diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 90b882e41..4c0e03d40 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -15,10 +15,12 @@ "settings": {}, // Add the IDs of extensions you want installed when the container is created. "extensions": [ + "ms-azuretools.vscode-docker", "dbaeumer.vscode-eslint", "svelte.svelte-vscode", "ardenivanov.svelte-intellisense", - "Prisma.prisma" + "Prisma.prisma", + "bradlc.vscode-tailwindcss" ], // Use 'forwardPorts' to make a list of ports inside the container available locally. "forwardPorts": [3000], diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 632a4655d..f96d5b45c 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,2 +1,2 @@ open_collective: coollabsio -github: coollabsio +github: coollabsio \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 326d434b3..4642a8549 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,6 +3,6 @@ contact_links: - name: 🤔 Questions and Help url: https://discord.com/invite/6rDM4fkymF about: Reach out to us on discord or our github discussions page. - - name: 🙋‍♂️ service request + - name: 🙋‍♂️ Service request url: https://feedback.coolify.io/ - about: want to request a new service? for e.g wordpress, hasura, appwrite etc... + about: Want to request a new service or build pack? For example Wordpress, Hasura, Appwrite, Angular etc... diff --git a/.gitpod.yml b/.gitpod.yml index 3af3611be..d46244e67 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -9,3 +9,5 @@ tasks: ports: - port: 3001 visibility: public + - port: 3000 + visibility: public diff --git a/Dockerfile b/Dockerfile index 1a2ced973..00c29b5f0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM node:18-alpine as build +FROM node:18-alpine3.16 as build WORKDIR /app RUN apk add --no-cache curl @@ -9,7 +9,7 @@ RUN pnpm install RUN pnpm build # Production build -FROM node:18-alpine +FROM node:18-alpine3.16 WORKDIR /app ENV NODE_ENV production ARG TARGETPLATFORM @@ -27,8 +27,10 @@ RUN apk add --no-cache git git-lfs openssh-client curl jq cmake sqlite openssl RUN curl -sL https://unpkg.com/@pnpm/self-installer | node RUN mkdir -p ~/.docker/cli-plugins/ +# https://download.docker.com/linux/static/stable/ RUN curl -SL https://cdn.coollabs.io/bin/$TARGETPLATFORM/docker-20.10.9 -o /usr/bin/docker -RUN curl -SL https://cdn.coollabs.io/bin/$TARGETPLATFORM/docker-compose-linux-2.3.4 -o ~/.docker/cli-plugins/docker-compose +# https://github.com/docker/compose/releases +RUN curl -SL https://cdn.coollabs.io/bin/$TARGETPLATFORM/docker-compose-linux-2.7.0 -o ~/.docker/cli-plugins/docker-compose RUN chmod +x ~/.docker/cli-plugins/docker-compose /usr/bin/docker COPY --from=build /app/apps/api/build/ . diff --git a/LICENSE b/LICENSE index 29ebfa545..86c87eba2 100644 --- a/LICENSE +++ b/LICENSE @@ -1,661 +1,201 @@ - GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published - by the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. \ No newline at end of file + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2022] [Andras Bacsai] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index b08d7dfa6..40d7ac2dc 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,32 @@ # Coolify -An open-source & self-hostable Heroku / Netlify alternative. +An open-source & self-hostable Heroku / Netlify alternative +(ARM support is in beta). + +## Financial Contributors + +Become a financial contributor and help us sustain our community. [[Contribute](https://opencollective.com/coollabsio/contribute)] + +### Individuals + + + +### Organizations + +Support this project with your organization. Your logo will show up here with a link to your website. + + + + + + + + + + + + +--- ## Live Demo @@ -12,6 +38,8 @@ https://demo.coolify.io/ If you have a new service / build pack you would like to add, raise an idea [here](https://feedback.coolify.io/) to get feedback from the community! +--- + ## How to install Installation is automated with the following command: @@ -28,60 +56,65 @@ wget -q https://get.coollabs.io/coolify/install.sh -O install.sh; sudo bash ./in For more details goto the [docs](https://docs.coollabs.io/coolify/installation). -## Features +--- -ARM support is in beta! +## Features ### Git Sources -You can use the following Git Sources to be auto-deployed to your Coolifyt instance! (Self-hosted versions are also supported.) +You can use the following Git Sources to be auto-deployed to your Coolify instance! (Self-hosted versions are also supported.) -- Github -- GitLab -- Bitbucket (WIP) + + ### Destinations You can deploy your applications to the following destinations: - Local Docker Engine -- Remote Docker Engine (WIP) -- Kubernetes (WIP) +- Remote Docker Engine ### Applications -These are the predefined build packs, but with the Docker build pack, you can host anything that is hostable with a single Dockerfile. +Predefined build packs to cover the basic needs to deploy applications. -- Static sites -- NodeJS -- VueJS -- NuxtJS -- NextJS -- React/Preact -- Gatsby -- Svelte -- PHP -- Laravel -- Rust -- Docker -- Python -- Deno +If you have an advanced use case, you can use the Docker build pack that allows you to deploy your application based on your custom Dockerfile. + + + + + + + + + + + + + + + + + +If you have a new build pack you would like to add, raise an idea [here](https://feedback.coolify.io/) to get feedback from the community! ### Databases One-click database is ready to be used internally or shared over the internet: -- MongoDB -- MariaDB -- MySQL -- PostgreSQL -- CouchDB -- Redis + + + + + + -### One-click services +If you have a new database you would like to add, raise an idea [here](https://feedback.coolify.io/) to get feedback from the community! -You can host cool open-source services as well: +### Services + +You quickly need to host a self-hostable, open-source service? You can do it with a few clicks! - [WordPress](https://docs.coollabs.io/coolify/services/wordpress) - [Ghost](https://ghost.org) - [Plausible Analytics](https://docs.coollabs.io/coolify/services/plausible-analytics) @@ -97,6 +130,9 @@ You can host cool open-source services as well: - [Fider](https://fider.io) - [Hasura](https://hasura.io) + +If you have a new service you would like to add, raise an idea [here](https://feedback.coolify.io/) to get feedback from the community! + ## Migration from v1 A fresh installation is necessary. v2 and v3 are not compatible with v1. @@ -108,9 +144,6 @@ A fresh installation is necessary. v2 and v3 are not compatible with v1. - Email: [andras@coollabs.io](mailto:andras@coollabs.io) - Discord: [Invitation](https://discord.gg/xhBCC7eGKw) -## Contribute - -See [our contribution guide](./CONTRIBUTING.md). ## License diff --git a/apps/api/package.json b/apps/api/package.json index 9a37a9366..c088cb521 100644 --- a/apps/api/package.json +++ b/apps/api/package.json @@ -1,7 +1,7 @@ { - "name": "coolify-api", + "name": "api", "description": "Coolify's Fastify API", - "license": "AGPL-3.0", + "license": "Apache-2.0", "scripts": { "db:push": "prisma db push && prisma generate", "db:seed": "prisma db seed", @@ -15,51 +15,54 @@ }, "dependencies": { "@breejs/ts-worker": "2.0.0", - "@fastify/autoload": "5.1.0", - "@fastify/cookie": "7.1.0", - "@fastify/cors": "8.0.0", - "@fastify/env": "4.0.0", - "@fastify/jwt": "6.3.1", - "@fastify/static": "6.4.0", + "@fastify/autoload": "5.2.0", + "@fastify/cookie": "7.3.1", + "@fastify/cors": "8.1.0", + "@fastify/env": "4.1.0", + "@fastify/jwt": "6.3.2", + "@fastify/static": "6.5.0", "@iarna/toml": "2.2.5", "@prisma/client": "3.15.2", "axios": "0.27.2", "bcryptjs": "2.4.3", - "bree": "9.1.1", + "bree": "9.1.2", "cabin": "9.1.2", "compare-versions": "4.1.3", "cuid": "2.1.8", - "dayjs": "1.11.3", - "dockerode": "3.3.2", + "dayjs": "1.11.4", + "dockerode": "3.3.3", "dotenv-extended": "2.9.0", - "fastify": "4.2.1", - "fastify-plugin": "4.0.0", + "fastify": "4.4.0", + "fastify-plugin": "4.1.0", "generate-password": "1.7.0", "get-port": "6.1.2", - "got": "12.1.0", - "is-ip": "4.0.0", + "got": "12.3.1", + "is-ip": "5.0.0", + "is-port-reachable": "4.0.0", "js-yaml": "4.1.0", "jsonwebtoken": "8.5.1", "node-forge": "1.3.1", "node-os-utils": "1.3.7", - "p-queue": "7.2.0", + "p-queue": "7.3.0", + "public-ip": "6.0.1", + "ssh-config": "4.1.6", "strip-ansi": "7.0.1", "unique-names-generator": "4.7.1" }, "devDependencies": { - "@types/node": "18.0.4", + "@types/node": "18.6.5", "@types/node-os-utils": "1.3.0", - "@typescript-eslint/eslint-plugin": "5.30.6", - "@typescript-eslint/parser": "5.30.6", - "esbuild": "0.14.49", - "eslint": "8.19.0", + "@typescript-eslint/eslint-plugin": "5.33.0", + "@typescript-eslint/parser": "5.33.0", + "esbuild": "0.15.0", + "eslint": "8.21.0", "eslint-config-prettier": "8.5.0", "eslint-plugin-prettier": "4.2.1", "nodemon": "2.0.19", "prettier": "2.7.1", "prisma": "3.15.2", "rimraf": "3.0.2", - "tsconfig-paths": "4.0.0", + "tsconfig-paths": "4.1.0", "typescript": "4.7.4" }, "prisma": { diff --git a/apps/api/prisma/migrations/20220718083646_moodle/migration.sql b/apps/api/prisma/migrations/20220718083646_moodle/migration.sql new file mode 100644 index 000000000..eaed14a4a --- /dev/null +++ b/apps/api/prisma/migrations/20220718083646_moodle/migration.sql @@ -0,0 +1,20 @@ +-- CreateTable +CREATE TABLE "Moodle" ( + "id" TEXT NOT NULL PRIMARY KEY, + "serviceId" TEXT NOT NULL, + "defaultUsername" TEXT NOT NULL, + "defaultPassword" TEXT NOT NULL, + "defaultEmail" TEXT NOT NULL, + "mariadbUser" TEXT NOT NULL, + "mariadbPassword" TEXT NOT NULL, + "mariadbRootUser" TEXT NOT NULL, + "mariadbRootUserPassword" TEXT NOT NULL, + "mariadbDatabase" TEXT NOT NULL, + "mariadbPublicPort" INTEGER, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "Moodle_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service" ("id") ON DELETE RESTRICT ON UPDATE CASCADE +); + +-- CreateIndex +CREATE UNIQUE INDEX "Moodle_serviceId_key" ON "Moodle"("serviceId"); diff --git a/apps/api/prisma/migrations/20220718114551_remote_docker_engine/migration.sql b/apps/api/prisma/migrations/20220718114551_remote_docker_engine/migration.sql new file mode 100644 index 000000000..e80df22f2 --- /dev/null +++ b/apps/api/prisma/migrations/20220718114551_remote_docker_engine/migration.sql @@ -0,0 +1,21 @@ +-- RedefineTables +PRAGMA foreign_keys=OFF; +CREATE TABLE "new_DestinationDocker" ( + "id" TEXT NOT NULL PRIMARY KEY, + "network" TEXT NOT NULL, + "name" TEXT NOT NULL, + "engine" TEXT, + "remoteEngine" BOOLEAN NOT NULL DEFAULT false, + "remoteIpAddress" TEXT, + "remoteUser" TEXT, + "remotePort" INTEGER, + "isCoolifyProxyUsed" BOOLEAN DEFAULT false, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); +INSERT INTO "new_DestinationDocker" ("createdAt", "engine", "id", "isCoolifyProxyUsed", "name", "network", "remoteEngine", "updatedAt") SELECT "createdAt", "engine", "id", "isCoolifyProxyUsed", "name", "network", "remoteEngine", "updatedAt" FROM "DestinationDocker"; +DROP TABLE "DestinationDocker"; +ALTER TABLE "new_DestinationDocker" RENAME TO "DestinationDocker"; +CREATE UNIQUE INDEX "DestinationDocker_network_key" ON "DestinationDocker"("network"); +PRAGMA foreign_key_check; +PRAGMA foreign_keys=ON; diff --git a/apps/api/prisma/migrations/20220721084020_ssh_key/migration.sql b/apps/api/prisma/migrations/20220721084020_ssh_key/migration.sql new file mode 100644 index 000000000..8323af409 --- /dev/null +++ b/apps/api/prisma/migrations/20220721084020_ssh_key/migration.sql @@ -0,0 +1,33 @@ +-- CreateTable +CREATE TABLE "SshKey" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "privateKey" TEXT NOT NULL, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- RedefineTables +PRAGMA foreign_keys=OFF; +CREATE TABLE "new_DestinationDocker" ( + "id" TEXT NOT NULL PRIMARY KEY, + "network" TEXT NOT NULL, + "name" TEXT NOT NULL, + "engine" TEXT, + "remoteEngine" BOOLEAN NOT NULL DEFAULT false, + "remoteIpAddress" TEXT, + "remoteUser" TEXT, + "remotePort" INTEGER, + "remoteVerified" BOOLEAN NOT NULL DEFAULT false, + "isCoolifyProxyUsed" BOOLEAN DEFAULT false, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + "sshKeyId" TEXT, + CONSTRAINT "DestinationDocker_sshKeyId_fkey" FOREIGN KEY ("sshKeyId") REFERENCES "SshKey" ("id") ON DELETE SET NULL ON UPDATE CASCADE +); +INSERT INTO "new_DestinationDocker" ("createdAt", "engine", "id", "isCoolifyProxyUsed", "name", "network", "remoteEngine", "remoteIpAddress", "remotePort", "remoteUser", "updatedAt") SELECT "createdAt", "engine", "id", "isCoolifyProxyUsed", "name", "network", "remoteEngine", "remoteIpAddress", "remotePort", "remoteUser", "updatedAt" FROM "DestinationDocker"; +DROP TABLE "DestinationDocker"; +ALTER TABLE "new_DestinationDocker" RENAME TO "DestinationDocker"; +CREATE UNIQUE INDEX "DestinationDocker_network_key" ON "DestinationDocker"("network"); +PRAGMA foreign_key_check; +PRAGMA foreign_keys=ON; diff --git a/apps/api/prisma/migrations/20220722203927_ipaddress/migration.sql b/apps/api/prisma/migrations/20220722203927_ipaddress/migration.sql new file mode 100644 index 000000000..21d45ada3 --- /dev/null +++ b/apps/api/prisma/migrations/20220722203927_ipaddress/migration.sql @@ -0,0 +1,3 @@ +-- AlterTable +ALTER TABLE "Setting" ADD COLUMN "ipv4" TEXT; +ALTER TABLE "Setting" ADD COLUMN "ipv6" TEXT; diff --git a/apps/api/prisma/migrations/20220725191205_architecture/migration.sql b/apps/api/prisma/migrations/20220725191205_architecture/migration.sql new file mode 100644 index 000000000..2e0ff3e01 --- /dev/null +++ b/apps/api/prisma/migrations/20220725191205_architecture/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "Setting" ADD COLUMN "arch" TEXT; diff --git a/apps/api/prisma/migrations/20220726121333_fix_ssh_key/migration.sql b/apps/api/prisma/migrations/20220726121333_fix_ssh_key/migration.sql new file mode 100644 index 000000000..e6e47b197 --- /dev/null +++ b/apps/api/prisma/migrations/20220726121333_fix_ssh_key/migration.sql @@ -0,0 +1,16 @@ +-- RedefineTables +PRAGMA foreign_keys=OFF; +CREATE TABLE "new_SshKey" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "privateKey" TEXT NOT NULL, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + "teamId" TEXT, + CONSTRAINT "SshKey_teamId_fkey" FOREIGN KEY ("teamId") REFERENCES "Team" ("id") ON DELETE SET NULL ON UPDATE CASCADE +); +INSERT INTO "new_SshKey" ("createdAt", "id", "name", "privateKey", "updatedAt") SELECT "createdAt", "id", "name", "privateKey", "updatedAt" FROM "SshKey"; +DROP TABLE "SshKey"; +ALTER TABLE "new_SshKey" RENAME TO "SshKey"; +PRAGMA foreign_key_check; +PRAGMA foreign_keys=ON; diff --git a/apps/api/prisma/migrations/20220806090621_fqdn_not_unique_anymore/migration.sql b/apps/api/prisma/migrations/20220806090621_fqdn_not_unique_anymore/migration.sql new file mode 100644 index 000000000..f1eab666b --- /dev/null +++ b/apps/api/prisma/migrations/20220806090621_fqdn_not_unique_anymore/migration.sql @@ -0,0 +1,2 @@ +-- DropIndex +DROP INDEX "Application_fqdn_key"; diff --git a/apps/api/prisma/migrations/20220806102340_rde_ssh_local_port/migration.sql b/apps/api/prisma/migrations/20220806102340_rde_ssh_local_port/migration.sql new file mode 100644 index 000000000..ae4c39846 --- /dev/null +++ b/apps/api/prisma/migrations/20220806102340_rde_ssh_local_port/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "DestinationDocker" ADD COLUMN "sshLocalPort" INTEGER; diff --git a/apps/api/prisma/schema.prisma b/apps/api/prisma/schema.prisma index 8d1e0bbfe..7d50b63b8 100644 --- a/apps/api/prisma/schema.prisma +++ b/apps/api/prisma/schema.prisma @@ -23,6 +23,9 @@ model Setting { isTraefikUsed Boolean @default(true) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + ipv4 String? + ipv6 String? + arch String? } model User { @@ -30,39 +33,40 @@ model User { email String @unique type String password String? - teams Team[] - permission Permission[] createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + permission Permission[] + teams Team[] } model Permission { id String @id @default(cuid()) - user User @relation(fields: [userId], references: [id]) userId String - team Team @relation(fields: [teamId], references: [id]) teamId String permission String createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + team Team @relation(fields: [teamId], references: [id]) + user User @relation(fields: [userId], references: [id]) } model Team { id String @id @default(cuid()) - users User[] name String? + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + databaseId String? + serviceId String? + permissions Permission[] + sshKey SshKey[] applications Application[] + database Database[] + destinationDocker DestinationDocker[] gitSources GitSource[] gitHubApps GithubApp[] gitLabApps GitlabApp[] - destinationDocker DestinationDocker[] - permissions Permission[] - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt - database Database[] @relation(references: [id]) - databaseId String? - service Service[] @relation(references: [id]) - serviceId String? + service Service[] + users User[] } model TeamInvitation { @@ -78,7 +82,7 @@ model TeamInvitation { model Application { id String @id @default(cuid()) name String - fqdn String? @unique + fqdn String? repository String? configHash String? branch String? @@ -101,21 +105,20 @@ model Application { denoOptions String? createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - settings ApplicationSettings? - teams Team[] destinationDockerId String? - destinationDocker DestinationDocker? @relation(fields: [destinationDockerId], references: [id]) gitSourceId String? - gitSource GitSource? @relation(fields: [gitSourceId], references: [id]) - secrets Secret[] - persistentStorage ApplicationPersistentStorage[] baseImage String? baseBuildImage String? + gitSource GitSource? @relation(fields: [gitSourceId], references: [id]) + destinationDocker DestinationDocker? @relation(fields: [destinationDockerId], references: [id]) + persistentStorage ApplicationPersistentStorage[] + settings ApplicationSettings? + secrets Secret[] + teams Team[] } model ApplicationSettings { id String @id @default(cuid()) - application Application @relation(fields: [applicationId], references: [id]) applicationId String @unique dualCerts Boolean @default(false) debug Boolean @default(false) @@ -123,26 +126,27 @@ model ApplicationSettings { autodeploy Boolean @default(true) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + application Application @relation(fields: [applicationId], references: [id]) } model ApplicationPersistentStorage { id String @id @default(cuid()) - application Application @relation(fields: [applicationId], references: [id]) applicationId String path String createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + application Application @relation(fields: [applicationId], references: [id]) @@unique([applicationId, path]) } model ServicePersistentStorage { id String @id @default(cuid()) - service Service @relation(fields: [serviceId], references: [id]) serviceId String path String createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) @@unique([serviceId, path]) } @@ -155,8 +159,8 @@ model Secret { isBuildSecret Boolean @default(false) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - application Application @relation(fields: [applicationId], references: [id]) applicationId String + application Application @relation(fields: [applicationId], references: [id]) @@unique([name, applicationId, isPRMRSecret]) } @@ -167,8 +171,8 @@ model ServiceSecret { value String createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - service Service @relation(fields: [serviceId], references: [id]) serviceId String + service Service @relation(fields: [serviceId], references: [id]) @@unique([name, serviceId]) } @@ -200,21 +204,38 @@ model DestinationDocker { id String @id @default(cuid()) network String @unique name String - engine String + engine String? remoteEngine Boolean @default(false) + remoteIpAddress String? + remoteUser String? + remotePort Int? + remoteVerified Boolean @default(false) isCoolifyProxyUsed Boolean? @default(false) - teams Team[] - application Application[] createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + sshKeyId String? + sshKey SshKey? @relation(fields: [sshKeyId], references: [id]) + sshLocalPort Int? + application Application[] database Database[] service Service[] + teams Team[] +} + +model SshKey { + id String @id @default(cuid()) + name String + privateKey String + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + teamId String? + team Team? @relation(fields: [teamId], references: [id]) + destinationDocker DestinationDocker[] } model GitSource { id String @id @default(cuid()) name String - teams Team[] type String? apiUrl String? htmlUrl String? @@ -223,16 +244,16 @@ model GitSource { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt githubAppId String? @unique - githubApp GithubApp? @relation(fields: [githubAppId], references: [id]) - application Application[] gitlabAppId String? @unique gitlabApp GitlabApp? @relation(fields: [gitlabAppId], references: [id]) + githubApp GithubApp? @relation(fields: [githubAppId], references: [id]) + application Application[] + teams Team[] } model GithubApp { id String @id @default(cuid()) name String? @unique - teams Team[] appId Int? installationId Int? clientId String? @@ -242,13 +263,13 @@ model GithubApp { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt gitSource GitSource? + teams Team[] } model GitlabApp { id String @id @default(cuid()) oauthId Int @unique groupName String? @unique - teams Team[] deployKeyId Int? privateSshKey String? publicSshKey String? @@ -258,6 +279,7 @@ model GitlabApp { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt gitSource GitSource? + teams Team[] } model Database { @@ -271,22 +293,22 @@ model Database { dbUserPassword String? rootUser String? rootUserPassword String? - settings DatabaseSettings? - destinationDocker DestinationDocker? @relation(fields: [destinationDockerId], references: [id]) destinationDockerId String? - teams Team[] createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + destinationDocker DestinationDocker? @relation(fields: [destinationDockerId], references: [id]) + settings DatabaseSettings? + teams Team[] } model DatabaseSettings { id String @id @default(cuid()) - database Database @relation(fields: [databaseId], references: [id]) databaseId String @unique isPublic Boolean @default(false) appendOnly Boolean @default(true) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + database Database @relation(fields: [databaseId], references: [id]) } model Service { @@ -297,23 +319,23 @@ model Service { dualCerts Boolean @default(false) type String? version String? - teams Team[] destinationDockerId String? - destinationDocker DestinationDocker? @relation(fields: [destinationDockerId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - plausibleAnalytics PlausibleAnalytics? + destinationDocker DestinationDocker? @relation(fields: [destinationDockerId], references: [id]) + fider Fider? + ghost Ghost? + hasura Hasura? + meiliSearch MeiliSearch? minio Minio? + moodle Moodle? + plausibleAnalytics PlausibleAnalytics? + persistentStorage ServicePersistentStorage[] + serviceSecret ServiceSecret[] + umami Umami? vscodeserver Vscodeserver? wordpress Wordpress? - ghost Ghost? - serviceSecret ServiceSecret[] - meiliSearch MeiliSearch? - persistentStorage ServicePersistentStorage[] - umami Umami? - hasura Hasura? - fider Fider? - moodle Moodle? + teams Team[] } model PlausibleAnalytics { @@ -328,9 +350,9 @@ model PlausibleAnalytics { secretKeyBase String? scriptName String @default("plausible.js") serviceId String @unique - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } model Minio { @@ -340,18 +362,18 @@ model Minio { publicPort Int? apiFqdn String? serviceId String @unique - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } model Vscodeserver { id String @id @default(cuid()) password String serviceId String @unique - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } model Wordpress { @@ -374,9 +396,9 @@ model Wordpress { ftpHostKey String? ftpHostKeyPrivate String? serviceId String @unique - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } model Ghost { @@ -390,18 +412,18 @@ model Ghost { mariadbDatabase String? mariadbPublicPort Int? serviceId String @unique - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } model MeiliSearch { id String @id @default(cuid()) masterKey String serviceId String @unique - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } model Umami { @@ -413,9 +435,9 @@ model Umami { postgresqlPublicPort Int? umamiAdminPassword String hashSalt String - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } model Hasura { @@ -426,9 +448,9 @@ model Hasura { postgresqlDatabase String postgresqlPublicPort Int? graphQLAdminPassword String - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } model Fider { @@ -448,9 +470,9 @@ model Fider { emailSmtpUser String? emailSmtpPassword String? emailSmtpEnableStartTls Boolean @default(false) - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } model Moodle { @@ -465,7 +487,7 @@ model Moodle { mariadbRootUserPassword String mariadbDatabase String mariadbPublicPort Int? - service Service @relation(fields: [serviceId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + service Service @relation(fields: [serviceId], references: [id]) } diff --git a/apps/api/prisma/seed.js b/apps/api/prisma/seed.js index 78a625e17..96b55b105 100644 --- a/apps/api/prisma/seed.js +++ b/apps/api/prisma/seed.js @@ -24,7 +24,8 @@ async function main() { data: { isRegistrationEnabled: true, proxyPassword: encrypt(generatePassword()), - proxyUser: cuid() + proxyUser: cuid(), + arch: process.arch } }); } else { diff --git a/apps/api/src/index.ts b/apps/api/src/index.ts index e302df635..60f83a50f 100644 --- a/apps/api/src/index.ts +++ b/apps/api/src/index.ts @@ -5,7 +5,7 @@ import env from '@fastify/env'; import cookie from '@fastify/cookie'; import path, { join } from 'path'; import autoLoad from '@fastify/autoload'; -import { asyncExecShell, isDev, prisma } from './lib/common'; +import { asyncExecShell, isDev, listSettings, prisma } from './lib/common'; import { scheduler } from './lib/scheduler'; declare module 'fastify' { @@ -101,10 +101,10 @@ fastify.listen({ port, host }, async (err: any, address: any) => { process.exit(1); } console.log(`Coolify's API is listening on ${host}:${port}`); - await initServer() + await initServer(); await scheduler.start('deployApplication'); await scheduler.start('cleanupStorage'); - await scheduler.start('checkProxies') + await scheduler.start('checkProxies'); // Check if no build is running @@ -130,12 +130,37 @@ fastify.listen({ port, host }, async (err: any, address: any) => { if (!scheduler.workers.has('deployApplication')) await scheduler.start('deployApplication'); } }); + await getArch(); + await getIPAddress(); }); +async function getIPAddress() { + const { publicIpv4, publicIpv6 } = await import('public-ip') + try { + const settings = await listSettings(); + if (!settings.ipv4) { + const ipv4 = await publicIpv4({ timeout: 2000 }) + await prisma.setting.update({ where: { id: settings.id }, data: { ipv4 } }) + } + if (!settings.ipv6) { + const ipv6 = await publicIpv6({ timeout: 2000 }) + await prisma.setting.update({ where: { id: settings.id }, data: { ipv6 } }) + } + + } catch (error) { } +} async function initServer() { try { await asyncExecShell(`docker network create --attachable coolify`); } catch (error) { } } +async function getArch() { + try { + const settings = await prisma.setting.findFirst({}) + if (settings && !settings.arch) { + await prisma.setting.update({ where: { id: settings.id }, data: { arch: process.arch } }) + } + } catch (error) { } +} diff --git a/apps/api/src/jobs/checkProxies.ts b/apps/api/src/jobs/checkProxies.ts index 6168baf5a..761554061 100644 --- a/apps/api/src/jobs/checkProxies.ts +++ b/apps/api/src/jobs/checkProxies.ts @@ -1,24 +1,24 @@ import { parentPort } from 'node:worker_threads'; -import { prisma, startTraefikTCPProxy, generateDatabaseConfiguration, startTraefikProxy, asyncExecShell } from '../lib/common'; -import { checkContainer, getEngine } from '../lib/docker'; +import { prisma, startTraefikTCPProxy, generateDatabaseConfiguration, startTraefikProxy, executeDockerCmd } from '../lib/common'; +import { checkContainer } from '../lib/docker'; (async () => { if (parentPort) { - // Coolify Proxy + // Coolify Proxy local const engine = '/var/run/docker.sock'; const localDocker = await prisma.destinationDocker.findFirst({ where: { engine, network: 'coolify' } }); if (localDocker && localDocker.isCoolifyProxyUsed) { // Remove HAProxy - const found = await checkContainer(engine, 'coolify-haproxy'); - const host = getEngine(engine); + const found = await checkContainer({ dockerId: localDocker.id, container: 'coolify-haproxy' }); if (found) { - await asyncExecShell( - `DOCKER_HOST="${host}" docker stop -t 0 coolify-haproxy && docker rm coolify-haproxy` - ); + await executeDockerCmd({ + dockerId: localDocker.id, + command: `docker stop -t 0 coolify-haproxy && docker rm coolify-haproxy` + }) } - await startTraefikProxy(engine); + await startTraefikProxy(localDocker.id); } @@ -32,12 +32,14 @@ import { checkContainer, getEngine } from '../lib/docker'; if (destinationDockerId && destinationDocker.isCoolifyProxyUsed) { const { privatePort } = generateDatabaseConfiguration(database); // Remove HAProxy - const found = await checkContainer(engine, `haproxy-for-${publicPort}`); - const host = getEngine(engine); + const found = await checkContainer({ + dockerId: localDocker.id, container: `haproxy-for-${publicPort}` + }); if (found) { - await asyncExecShell( - `DOCKER_HOST="${host}" docker stop -t 0 haproxy-for-${publicPort} && docker rm haproxy-for-${publicPort}` - ); + await executeDockerCmd({ + dockerId: localDocker.id, + command: `docker stop -t 0 haproxy-for-${publicPort} && docker rm haproxy-for-${publicPort}` + }) } await startTraefikTCPProxy(destinationDocker, id, publicPort, privatePort); @@ -52,12 +54,12 @@ import { checkContainer, getEngine } from '../lib/docker'; const { destinationDockerId, destinationDocker, id } = service; if (destinationDockerId && destinationDocker.isCoolifyProxyUsed) { // Remove HAProxy - const found = await checkContainer(engine, `haproxy-for-${ftpPublicPort}`); - const host = getEngine(engine); + const found = await checkContainer({ dockerId: localDocker.id, container: `haproxy-for-${ftpPublicPort}` }); if (found) { - await asyncExecShell( - `DOCKER_HOST="${host}" docker stop -t 0 haproxy-for-${ftpPublicPort} && docker rm haproxy-for-${ftpPublicPort} ` - ); + await executeDockerCmd({ + dockerId: localDocker.id, + command: `docker stop -t 0 haproxy -for-${ftpPublicPort} && docker rm haproxy-for-${ftpPublicPort}` + }) } await startTraefikTCPProxy(destinationDocker, id, ftpPublicPort, 22, 'wordpressftp'); } @@ -73,12 +75,12 @@ import { checkContainer, getEngine } from '../lib/docker'; const { destinationDockerId, destinationDocker, id } = service; if (destinationDockerId && destinationDocker.isCoolifyProxyUsed) { // Remove HAProxy - const found = await checkContainer(engine, `${id}-${publicPort}`); - const host = getEngine(engine); + const found = await checkContainer({ dockerId: localDocker.id, container: `${id}-${publicPort}` }); if (found) { - await asyncExecShell( - `DOCKER_HOST="${host}" docker stop -t 0 ${id}-${publicPort} && docker rm ${id}-${publicPort}` - ); + await executeDockerCmd({ + dockerId: localDocker.id, + command: `docker stop -t 0 ${id}-${publicPort} && docker rm ${id}-${publicPort} ` + }) } await startTraefikTCPProxy(destinationDocker, id, publicPort, 9000); } diff --git a/apps/api/src/jobs/cleanupStorage.ts b/apps/api/src/jobs/cleanupStorage.ts index dd8636175..97683ac2d 100644 --- a/apps/api/src/jobs/cleanupStorage.ts +++ b/apps/api/src/jobs/cleanupStorage.ts @@ -1,20 +1,20 @@ import { parentPort } from 'node:worker_threads'; -import { asyncExecShell, cleanupDockerStorage, isDev, prisma, version } from '../lib/common'; -import { getEngine } from '../lib/docker'; +import { asyncExecShell, cleanupDockerStorage, executeDockerCmd, isDev, prisma, version } from '../lib/common'; (async () => { if (parentPort) { const destinationDockers = await prisma.destinationDocker.findMany(); - const engines = [...new Set(destinationDockers.map(({ engine }) => engine))]; - for (const engine of engines) { + let enginesDone = new Set() + for (const destination of destinationDockers) { + if (enginesDone.has(destination.engine) || enginesDone.has(destination.remoteIpAddress)) return + if (destination.engine) enginesDone.add(destination.engine) + if (destination.remoteIpAddress) enginesDone.add(destination.remoteIpAddress) + let lowDiskSpace = false; - const host = getEngine(engine); try { let stdout = null if (!isDev) { - const output = await asyncExecShell( - `DOCKER_HOST=${host} docker exec coolify sh -c 'df -kPT /'` - ); + const output = await executeDockerCmd({ dockerId: destination.id, command: `CONTAINER=$(docker ps -lq | head -1) && docker exec $CONTAINER sh -c 'df -kPT /'` }) stdout = output.stdout; } else { const output = await asyncExecShell( @@ -53,7 +53,7 @@ import { getEngine } from '../lib/docker'; } catch (error) { console.log(error); } - await cleanupDockerStorage(host, lowDiskSpace, false) + await cleanupDockerStorage(destination.id, lowDiskSpace, false) } await prisma.$disconnect(); } else process.exit(0); diff --git a/apps/api/src/jobs/deployApplication.ts b/apps/api/src/jobs/deployApplication.ts index b5dbe24d9..e31ccf149 100644 --- a/apps/api/src/jobs/deployApplication.ts +++ b/apps/api/src/jobs/deployApplication.ts @@ -4,8 +4,7 @@ import fs from 'fs/promises'; import yaml from 'js-yaml'; import { copyBaseConfigurationFiles, makeLabelForStandaloneApplication, saveBuildLog, setDefaultConfiguration } from '../lib/buildPacks/common'; -import { asyncExecShell, createDirectories, decrypt, getDomain, prisma } from '../lib/common'; -import { dockerInstance, getEngine } from '../lib/docker'; +import { createDirectories, decrypt, executeDockerCmd, getDomain, prisma } from '../lib/common'; import * as importers from '../lib/importers'; import * as buildpacks from '../lib/buildPacks'; @@ -104,9 +103,6 @@ import * as buildpacks from '../lib/buildPacks'; destinationType = 'docker'; } if (destinationType === 'docker') { - const docker = dockerInstance({ destinationDocker }); - const host = getEngine(destinationDocker.engine); - await prisma.build.update({ where: { id: buildId }, data: { status: 'running' } }); const { workdir, repodir } = await createDirectories({ repository, buildId }); const configuration = await setDefaultConfiguration(message); @@ -185,18 +181,23 @@ import * as buildpacks from '../lib/buildPacks'; } else { deployNeeded = true; } - const image = await docker.engine.getImage(`${applicationId}:${tag}`); + let imageFound = false; try { - await image.inspect(); + await executeDockerCmd({ + dockerId: destinationDocker.id, + command: `docker image inspect ${applicationId}:${tag}` + }) imageFound = true; } catch (error) { // } - if (!imageFound || deployNeeded) { + // if (!imageFound || deployNeeded) { + if (true) { await copyBaseConfigurationFiles(buildPack, workdir, buildId, applicationId, baseImage); if (buildpacks[buildPack]) await buildpacks[buildPack]({ + dockerId: destinationDocker.id, buildId, applicationId, domain, @@ -212,7 +213,6 @@ import * as buildpacks from '../lib/buildPacks'; commit, tag, workdir, - docker, port: exposePort ? `${exposePort}:${port}` : port, installCommand, buildCommand, @@ -238,8 +238,8 @@ import * as buildpacks from '../lib/buildPacks'; await saveBuildLog({ line: 'Build image already available - no rebuild required.', buildId, applicationId }); } try { - await asyncExecShell(`DOCKER_HOST=${host} docker stop -t 0 ${imageId}`); - await asyncExecShell(`DOCKER_HOST=${host} docker rm ${imageId}`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker stop -t 0 ${imageId}` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker rm ${imageId}` }) } catch (error) { // } @@ -299,7 +299,7 @@ import * as buildpacks from '../lib/buildPacks'; container_name: imageId, volumes, env_file: envFound ? [`${workdir}/.env`] : [], - networks: [docker.network], + networks: [destinationDocker.network], labels, depends_on: [], restart: 'always', @@ -318,16 +318,14 @@ import * as buildpacks from '../lib/buildPacks'; } }, networks: { - [docker.network]: { + [destinationDocker.network]: { external: true } }, volumes: Object.assign({}, ...composeVolumes) }; await fs.writeFile(`${workdir}/docker-compose.yml`, yaml.dump(composeFile)); - await asyncExecShell( - `DOCKER_HOST=${host} docker compose --project-directory ${workdir} up -d` - ); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose --project-directory ${workdir} up -d` }) await saveBuildLog({ line: 'Deployment successful!', buildId, applicationId }); } catch (error) { await saveBuildLog({ line: error, buildId, applicationId }); diff --git a/apps/api/src/lib/buildPacks/common.ts b/apps/api/src/lib/buildPacks/common.ts index 1feb33822..3c34c5f31 100644 --- a/apps/api/src/lib/buildPacks/common.ts +++ b/apps/api/src/lib/buildPacks/common.ts @@ -1,7 +1,7 @@ -import { asyncExecShell, base64Encode, generateTimestamp, getDomain, isDev, prisma, version } from "../common"; -import { scheduler } from "../scheduler"; +import { base64Encode, executeDockerCmd, generateTimestamp, getDomain, isDev, prisma, version } from "../common"; import { promises as fs } from 'fs'; import { day } from "../dayjs"; + const staticApps = ['static', 'react', 'vuejs', 'svelte', 'gatsby', 'astro', 'eleventy']; const nodeBased = [ 'react', @@ -511,8 +511,8 @@ export async function buildImage({ applicationId, tag, workdir, - docker, buildId, + dockerId, isCache = false, debug = false, dockerFileLocation = '/Dockerfile' @@ -522,6 +522,9 @@ export async function buildImage({ } else { await saveBuildLog({ line: `Building image started.`, buildId, applicationId }); } + if (debug) { + await saveBuildLog({ line: `\n###############\nIMPORTANT: Due to some issues during implementing Remote Docker Engine, the builds logs are not streamed at the moment. You will see the full build log when the build is finished!\n###############`, buildId, applicationId }); + } if (!debug && isCache) { await saveBuildLog({ line: `Debug turned off. To see more details, allow it in the configuration.`, @@ -529,16 +532,61 @@ export async function buildImage({ applicationId }); } - - const stream = await docker.engine.buildImage( - { src: ['.'], context: workdir }, - { - dockerfile: isCache ? `${dockerFileLocation}-cache` : dockerFileLocation, - t: `${applicationId}:${tag}${isCache ? '-cache' : ''}` + const dockerFile = isCache ? `${dockerFileLocation}-cache` : `${dockerFileLocation}` + const cache = `${applicationId}:${tag}${isCache ? '-cache' : ''}` + const { stderr } = await executeDockerCmd({ dockerId, command: `docker build --progress plain -f ${workdir}/${dockerFile} -t ${cache} ${workdir}` }) + if (debug) { + const array = stderr.split('\n') + for (const line of array) { + if (line !== '\n') { + await saveBuildLog({ + line: `${line.replace('\n', '')}`, + buildId, + applicationId + }); + } } - ); - await streamEvents({ stream, docker, buildId, applicationId, debug }); - await saveBuildLog({ line: `Building image successful!`, buildId, applicationId }); + } + + + // await new Promise((resolve, reject) => { + // const command = spawn(`docker`, ['build', '-f', `${workdir}${dockerFile}`, '-t', `${cache}`,`${workdir}`], { + // env: { + // DOCKER_HOST: 'ssh://root@95.217.178.202', + // DOCKER_BUILDKIT: '1' + // } + // }); + // command.stdout.on('data', function (data) { + // console.log('stdout: ' + data); + // }); + // command.stderr.on('data', function (data) { + // console.log('stderr: ' + data); + // }); + // command.on('error', function (error) { + // console.log(error) + // reject(error) + // }) + // command.on('exit', function (code) { + // console.log('exit code: ' + code); + // resolve(code) + // }); + // }) + + + // console.log({ stdout, stderr }) + // const stream = await docker.engine.buildImage( + // { src: ['.'], context: workdir }, + // { + // dockerfile: isCache ? `${dockerFileLocation}-cache` : dockerFileLocation, + // t: `${applicationId}:${tag}${isCache ? '-cache' : ''}` + // } + // ); + // await streamEvents({ stream, docker, buildId, applicationId, debug }); + if (isCache) { + await saveBuildLog({ line: `Building cache image successful.`, buildId, applicationId }); + } else { + await saveBuildLog({ line: `Building image successful.`, buildId, applicationId }); + } } export async function streamEvents({ stream, docker, buildId, applicationId, debug }) { @@ -617,18 +665,16 @@ export function makeLabelForStandaloneApplication({ export async function buildCacheImageWithNode(data, imageForBuild) { const { - applicationId, - tag, workdir, - docker, buildId, baseDirectory, installCommand, buildCommand, - debug, secrets, pullmergeRequestId } = data; + + const isPnpm = checkPnpm(installCommand, buildCommand); const Dockerfile: Array = []; Dockerfile.push(`FROM ${imageForBuild}`); @@ -659,11 +705,12 @@ export async function buildCacheImageWithNode(data, imageForBuild) { Dockerfile.push(`COPY .${baseDirectory || ''} ./`); Dockerfile.push(`RUN ${buildCommand}`); await fs.writeFile(`${workdir}/Dockerfile-cache`, Dockerfile.join('\n')); - await buildImage({ applicationId, tag, workdir, docker, buildId, isCache: true, debug }); + await buildImage({ ...data, isCache: true }); } export async function buildCacheImageForLaravel(data, imageForBuild) { - const { applicationId, tag, workdir, docker, buildId, debug, secrets, pullmergeRequestId } = data; + const { workdir, buildId, secrets, pullmergeRequestId } = data; + const Dockerfile: Array = []; Dockerfile.push(`FROM ${imageForBuild}`); Dockerfile.push('WORKDIR /app'); @@ -687,22 +734,16 @@ export async function buildCacheImageForLaravel(data, imageForBuild) { Dockerfile.push(`COPY resources /app/resources`); Dockerfile.push(`RUN yarn install && yarn production`); await fs.writeFile(`${workdir}/Dockerfile-cache`, Dockerfile.join('\n')); - await buildImage({ applicationId, tag, workdir, docker, buildId, isCache: true, debug }); + await buildImage({ ...data, isCache: true }); } export async function buildCacheImageWithCargo(data, imageForBuild) { const { applicationId, - tag, workdir, - docker, buildId, - baseDirectory, - installCommand, - buildCommand, - debug, - secrets } = data; + const Dockerfile: Array = []; Dockerfile.push(`FROM ${imageForBuild} as planner-${applicationId}`); Dockerfile.push(`LABEL coolify.buildId=${buildId}`); @@ -717,5 +758,5 @@ export async function buildCacheImageWithCargo(data, imageForBuild) { Dockerfile.push(`COPY --from=planner-${applicationId} /app/recipe.json recipe.json`); Dockerfile.push('RUN cargo chef cook --release --recipe-path recipe.json'); await fs.writeFile(`${workdir}/Dockerfile-cache`, Dockerfile.join('\n')); - await buildImage({ applicationId, tag, workdir, docker, buildId, isCache: true, debug }); + await buildImage({ ...data, isCache: true }); } \ No newline at end of file diff --git a/apps/api/src/lib/buildPacks/docker.ts b/apps/api/src/lib/buildPacks/docker.ts index 88d900c38..04041b190 100644 --- a/apps/api/src/lib/buildPacks/docker.ts +++ b/apps/api/src/lib/buildPacks/docker.ts @@ -1,18 +1,18 @@ import { promises as fs } from 'fs'; import { buildImage } from './common'; -export default async function ({ - applicationId, - debug, - tag, - workdir, - docker, - buildId, - baseDirectory, - secrets, - pullmergeRequestId, - dockerFileLocation -}) { +export default async function (data) { + let { + applicationId, + debug, + tag, + workdir, + buildId, + baseDirectory, + secrets, + pullmergeRequestId, + dockerFileLocation + } = data try { const file = `${workdir}${dockerFileLocation}`; let dockerFileOut = `${workdir}`; @@ -45,7 +45,7 @@ export default async function ({ } await fs.writeFile(`${dockerFileOut}${dockerFileLocation}`, Dockerfile.join('\n')); - await buildImage({ applicationId, tag, workdir, docker, buildId, debug, dockerFileLocation }); + await buildImage(data); } catch (error) { throw error; } diff --git a/apps/api/src/lib/common.ts b/apps/api/src/lib/common.ts index abf409d6b..7de37be56 100644 --- a/apps/api/src/lib/common.ts +++ b/apps/api/src/lib/common.ts @@ -10,12 +10,14 @@ import crypto from 'crypto'; import { promises as dns } from 'dns'; import { PrismaClient } from '@prisma/client'; import cuid from 'cuid'; +import os from 'os'; +import sshConfig from 'ssh-config' -import { checkContainer, getEngine, removeContainer } from './docker'; +import { checkContainer, removeContainer } from './docker'; import { day } from './dayjs'; import * as serviceFields from './serviceFields' -export const version = '3.1.4'; +export const version = '3.2.0'; export const isDev = process.env.NODE_ENV === 'development'; const algorithm = 'aes-256-ctr'; @@ -29,21 +31,28 @@ const customConfig: Config = { export const defaultProxyImage = `coolify-haproxy-alpine:latest`; export const defaultProxyImageTcp = `coolify-haproxy-tcp-alpine:latest`; export const defaultProxyImageHttp = `coolify-haproxy-http-alpine:latest`; -export const defaultTraefikImage = `traefik:v2.6`; +export const defaultTraefikImage = `traefik:v2.8`; export function getAPIUrl() { if (process.env.GITPOD_WORKSPACE_URL) { const { href } = new URL(process.env.GITPOD_WORKSPACE_URL) const newURL = href.replace('https://', 'https://3001-').replace(/\/$/, '') return newURL } + if (process.env.CODESANDBOX_HOST) { + return `https://${process.env.CODESANDBOX_HOST.replace(/\$PORT/,'3001')}` + } return isDev ? 'http://localhost:3001' : 'http://localhost:3000'; } + export function getUIUrl() { if (process.env.GITPOD_WORKSPACE_URL) { const { href } = new URL(process.env.GITPOD_WORKSPACE_URL) const newURL = href.replace('https://', 'https://3000-').replace(/\/$/, '') return newURL } + if (process.env.CODESANDBOX_HOST) { + return `https://${process.env.CODESANDBOX_HOST.replace(/\$PORT/,'3000')}` + } return 'http://localhost:3000'; } @@ -113,164 +122,164 @@ export const encrypt = (text: string) => { }; export const supportedServiceTypesAndVersions = [ - { - name: 'plausibleanalytics', - fancyName: 'Plausible Analytics', - baseImage: 'plausible/analytics', - images: ['bitnami/postgresql:13.2.0', 'yandex/clickhouse-server:21.3.2.5'], - versions: ['latest', 'stable'], - recommendedVersion: 'stable', - ports: { - main: 8000 - } - }, - { - name: 'nocodb', - fancyName: 'NocoDB', - baseImage: 'nocodb/nocodb', - versions: ['latest'], - recommendedVersion: 'latest', - ports: { - main: 8080 - } - }, - { - name: 'minio', - fancyName: 'MinIO', - baseImage: 'minio/minio', - versions: ['latest'], - recommendedVersion: 'latest', - ports: { - main: 9001 - } - }, - { - name: 'vscodeserver', - fancyName: 'VSCode Server', - baseImage: 'codercom/code-server', - versions: ['latest'], - recommendedVersion: 'latest', - ports: { - main: 8080 - } - }, - { - name: 'wordpress', - fancyName: 'Wordpress', - baseImage: 'wordpress', - images: ['bitnami/mysql:5.7'], - versions: ['latest', 'php8.1', 'php8.0', 'php7.4', 'php7.3'], - recommendedVersion: 'latest', - ports: { - main: 80 - } - }, - { - name: 'vaultwarden', - fancyName: 'Vaultwarden', - baseImage: 'vaultwarden/server', - versions: ['latest'], - recommendedVersion: 'latest', - ports: { - main: 80 - } - }, - { - name: 'languagetool', - fancyName: 'LanguageTool', - baseImage: 'silviof/docker-languagetool', - versions: ['latest'], - recommendedVersion: 'latest', - ports: { - main: 8010 - } - }, - { - name: 'n8n', - fancyName: 'n8n', - baseImage: 'n8nio/n8n', - versions: ['latest'], - recommendedVersion: 'latest', - ports: { - main: 5678 - } - }, - { - name: 'uptimekuma', - fancyName: 'Uptime Kuma', - baseImage: 'louislam/uptime-kuma', - versions: ['latest'], - recommendedVersion: 'latest', - ports: { - main: 3001 - } - }, - { - name: 'ghost', - fancyName: 'Ghost', - baseImage: 'bitnami/ghost', - images: ['bitnami/mariadb'], - versions: ['latest'], - recommendedVersion: 'latest', - ports: { - main: 2368 - } - }, - { - name: 'meilisearch', - fancyName: 'Meilisearch', - baseImage: 'getmeili/meilisearch', - images: [], - versions: ['latest'], - recommendedVersion: 'latest', - ports: { - main: 7700 - } - }, - { - name: 'umami', - fancyName: 'Umami', - baseImage: 'ghcr.io/mikecao/umami', - images: ['postgres:12-alpine'], - versions: ['postgresql-latest'], - recommendedVersion: 'postgresql-latest', - ports: { - main: 3000 - } - }, - { - name: 'hasura', - fancyName: 'Hasura', - baseImage: 'hasura/graphql-engine', - images: ['postgres:12-alpine'], - versions: ['latest', 'v2.5.1'], - recommendedVersion: 'v2.5.1', - ports: { - main: 8080 - } - }, - { - name: 'fider', - fancyName: 'Fider', - baseImage: 'getfider/fider', - images: ['postgres:12-alpine'], - versions: ['stable'], - recommendedVersion: 'stable', - ports: { - main: 3000 - } - }, - // { - // name: 'moodle', - // fancyName: 'Moodle', - // baseImage: 'bitnami/moodle', - // images: [], - // versions: ['latest', 'v4.0.2'], - // recommendedVersion: 'latest', - // ports: { - // main: 8080 - // } - // } + { + name: 'plausibleanalytics', + fancyName: 'Plausible Analytics', + baseImage: 'plausible/analytics', + images: ['bitnami/postgresql:13.2.0', 'yandex/clickhouse-server:21.3.2.5'], + versions: ['latest', 'stable'], + recommendedVersion: 'stable', + ports: { + main: 8000 + } + }, + { + name: 'nocodb', + fancyName: 'NocoDB', + baseImage: 'nocodb/nocodb', + versions: ['latest'], + recommendedVersion: 'latest', + ports: { + main: 8080 + } + }, + { + name: 'minio', + fancyName: 'MinIO', + baseImage: 'minio/minio', + versions: ['latest'], + recommendedVersion: 'latest', + ports: { + main: 9001 + } + }, + { + name: 'vscodeserver', + fancyName: 'VSCode Server', + baseImage: 'codercom/code-server', + versions: ['latest'], + recommendedVersion: 'latest', + ports: { + main: 8080 + } + }, + { + name: 'wordpress', + fancyName: 'Wordpress', + baseImage: 'wordpress', + images: ['bitnami/mysql:5.7'], + versions: ['latest', 'php8.1', 'php8.0', 'php7.4', 'php7.3'], + recommendedVersion: 'latest', + ports: { + main: 80 + } + }, + { + name: 'vaultwarden', + fancyName: 'Vaultwarden', + baseImage: 'vaultwarden/server', + versions: ['latest'], + recommendedVersion: 'latest', + ports: { + main: 80 + } + }, + { + name: 'languagetool', + fancyName: 'LanguageTool', + baseImage: 'silviof/docker-languagetool', + versions: ['latest'], + recommendedVersion: 'latest', + ports: { + main: 8010 + } + }, + { + name: 'n8n', + fancyName: 'n8n', + baseImage: 'n8nio/n8n', + versions: ['latest'], + recommendedVersion: 'latest', + ports: { + main: 5678 + } + }, + { + name: 'uptimekuma', + fancyName: 'Uptime Kuma', + baseImage: 'louislam/uptime-kuma', + versions: ['latest'], + recommendedVersion: 'latest', + ports: { + main: 3001 + } + }, + { + name: 'ghost', + fancyName: 'Ghost', + baseImage: 'bitnami/ghost', + images: ['bitnami/mariadb'], + versions: ['latest'], + recommendedVersion: 'latest', + ports: { + main: 2368 + } + }, + { + name: 'meilisearch', + fancyName: 'Meilisearch', + baseImage: 'getmeili/meilisearch', + images: [], + versions: ['latest'], + recommendedVersion: 'latest', + ports: { + main: 7700 + } + }, + { + name: 'umami', + fancyName: 'Umami', + baseImage: 'ghcr.io/mikecao/umami', + images: ['postgres:12-alpine'], + versions: ['postgresql-latest'], + recommendedVersion: 'postgresql-latest', + ports: { + main: 3000 + } + }, + { + name: 'hasura', + fancyName: 'Hasura', + baseImage: 'hasura/graphql-engine', + images: ['postgres:12-alpine'], + versions: ['latest', 'v2.8.4', 'v2.5.1'], + recommendedVersion: 'v2.8.4', + ports: { + main: 8080 + } + }, + { + name: 'fider', + fancyName: 'Fider', + baseImage: 'getfider/fider', + images: ['postgres:12-alpine'], + versions: ['stable'], + recommendedVersion: 'stable', + ports: { + main: 3000 + } + }, + // { + // name: 'moodle', + // fancyName: 'Moodle', + // baseImage: 'bitnami/moodle', + // images: [], + // versions: ['latest', 'v4.0.2'], + // recommendedVersion: 'latest', + // ports: { + // main: 8080 + // } + // } ]; export async function checkDoubleBranch(branch: string, projectId: number): Promise { @@ -315,11 +324,13 @@ export function getDomain(domain: string): string { export async function isDomainConfigured({ id, fqdn, - checkOwn = false + checkOwn = false, + remoteIpAddress = undefined }: { id: string; fqdn: string; checkOwn?: boolean; + remoteIpAddress?: string; }): Promise { const domain = getDomain(fqdn); const nakedDomain = domain.replace('www.', ''); @@ -329,7 +340,10 @@ export async function isDomainConfigured({ { fqdn: { endsWith: `//${nakedDomain}` } }, { fqdn: { endsWith: `//www.${nakedDomain}` } } ], - id: { not: id } + id: { not: id }, + destinationDocker: { + remoteIpAddress, + } }, select: { fqdn: true } }); @@ -341,7 +355,10 @@ export async function isDomainConfigured({ { minio: { apiFqdn: { endsWith: `//${nakedDomain}` } } }, { minio: { apiFqdn: { endsWith: `//www.${nakedDomain}` } } } ], - id: { not: checkOwn ? undefined : id } + id: { not: checkOwn ? undefined : id }, + destinationDocker: { + remoteIpAddress + } }, select: { fqdn: true } }); @@ -359,12 +376,9 @@ export async function isDomainConfigured({ return !!(foundApp || foundService || coolifyFqdn); } -export async function getContainerUsage(engine: string, container: string): Promise { - const host = getEngine(engine); +export async function getContainerUsage(dockerId: string, container: string): Promise { try { - const { stdout } = await asyncExecShell( - `DOCKER_HOST="${host}" docker container stats ${container} --no-stream --no-trunc --format "{{json .}}"` - ); + const { stdout } = await executeDockerCmd({ dockerId, command: `docker container stats ${container} --no-stream --no-trunc --format "{{json .}}"` }) return JSON.parse(stdout); } catch (err) { return { @@ -453,7 +467,7 @@ export const supportedDatabaseTypesAndVersions = [ name: 'mariadb', fancyName: 'MariaDB', baseImage: 'bitnami/mariadb', - versions: ['10.7', '10.6', '10.5', '10.4', '10.3', '10.2'] + versions: ['10.8', '10.7', '10.6', '10.5', '10.4', '10.3', '10.2'] }, { name: 'postgresql', @@ -465,24 +479,106 @@ export const supportedDatabaseTypesAndVersions = [ name: 'redis', fancyName: 'Redis', baseImage: 'bitnami/redis', - versions: ['6.2', '6.0', '5.0'] + versions: ['7.0', '6.2', '6.0', '5.0'] }, - { name: 'couchdb', fancyName: 'CouchDB', baseImage: 'bitnami/couchdb', versions: ['3.2.1'] } + { name: 'couchdb', fancyName: 'CouchDB', baseImage: 'bitnami/couchdb', versions: ['3.2.2'] } ]; -export async function startTraefikProxy(engine: string): Promise { - const host = getEngine(engine); - const found = await checkContainer(engine, 'coolify-proxy', true); - const { proxyPassword, proxyUser, id } = await listSettings(); +export async function getFreeSSHLocalPort(id: string): Promise { + const { default: getPort, portNumbers } = await import('get-port'); + const { remoteIpAddress, sshLocalPort } = await prisma.destinationDocker.findUnique({ where: { id } }) + if (sshLocalPort) { + return Number(sshLocalPort) + } + const ports = await prisma.destinationDocker.findMany({ where: { sshLocalPort: { not: null }, remoteIpAddress: { not: remoteIpAddress } } }) + const alreadyConfigured = await prisma.destinationDocker.findFirst({ where: { remoteIpAddress, id: { not: id }, sshLocalPort: { not: null } } }) + if (alreadyConfigured?.sshLocalPort) { + await prisma.destinationDocker.update({ where: { id }, data: { sshLocalPort: alreadyConfigured.sshLocalPort } }) + return Number(alreadyConfigured.sshLocalPort) + } + const availablePort = await getPort({ port: portNumbers(10000, 10100), exclude: ports.map(p => p.sshLocalPort) }) + await prisma.destinationDocker.update({ where: { id }, data: { sshLocalPort: Number(availablePort) } }) + return Number(availablePort) +} + +export async function createRemoteEngineConfiguration(id: string) { + const homedir = os.homedir(); + const sshKeyFile = `/tmp/id_rsa-${id}` + const localPort = await getFreeSSHLocalPort(id); + const { sshKey: { privateKey }, remoteIpAddress, remotePort, remoteUser } = await prisma.destinationDocker.findFirst({ where: { id }, include: { sshKey: true } }) + await fs.writeFile(sshKeyFile, decrypt(privateKey) + '\n', { encoding: 'utf8', mode: 400 }) + // Needed for remote docker compose + const { stdout: numberOfSSHAgentsRunning } = await asyncExecShell(`ps ax | grep [s]sh-agent | grep ssh-agent.pid | grep -v grep | wc -l`) + if (numberOfSSHAgentsRunning !== '' && Number(numberOfSSHAgentsRunning.trim()) == 0) { + await asyncExecShell(`eval $(ssh-agent -sa /tmp/ssh-agent.pid)`) + } + await asyncExecShell(`SSH_AUTH_SOCK=/tmp/ssh-agent.pid ssh-add -q ${sshKeyFile}`) + + const { stdout: numberOfSSHTunnelsRunning } = await asyncExecShell(`ps ax | grep 'ssh -F /dev/null -o StrictHostKeyChecking no -fNL ${localPort}:localhost:${remotePort}' | grep -v grep | wc -l`) + if (numberOfSSHTunnelsRunning !== '' && Number(numberOfSSHTunnelsRunning.trim()) == 0) { + try { + await asyncExecShell(`SSH_AUTH_SOCK=/tmp/ssh-agent.pid ssh -F /dev/null -o "StrictHostKeyChecking no" -fNL ${localPort}:localhost:${remotePort} ${remoteUser}@${remoteIpAddress}`) + + } catch (error) { + console.log(error) + } + + } + const config = sshConfig.parse('') + const found = config.find({ Host: remoteIpAddress }) if (!found) { - const { stdout: Config } = await asyncExecShell( - `DOCKER_HOST="${host}" docker network inspect bridge --format '{{json .IPAM.Config }}'` - ); + config.append({ + Host: remoteIpAddress, + Hostname: 'localhost', + Port: Number(localPort), + User: remoteUser, + IdentityFile: sshKeyFile, + StrictHostKeyChecking: 'no' + }) + } + try { + await fs.stat(`${homedir}/.ssh/`) + } catch (error) { + await fs.mkdir(`${homedir}/.ssh/`) + } + return await fs.writeFile(`${homedir}/.ssh/config`, sshConfig.stringify(config)) +} +export async function executeDockerCmd({ dockerId, command }: { dockerId: string, command: string }) { + let { remoteEngine, remoteIpAddress, engine } = await prisma.destinationDocker.findUnique({ where: { id: dockerId } }) + if (remoteEngine) { + await createRemoteEngineConfiguration(dockerId) + engine = `ssh://${remoteIpAddress}` + } else { + engine = 'unix:///var/run/docker.sock' + } + return await asyncExecShell( + `DOCKER_BUILDKIT=1 DOCKER_HOST="${engine}" ${command}` + ); + +} +export async function startTraefikProxy(id: string): Promise { + const { engine, network, remoteEngine, remoteIpAddress } = await prisma.destinationDocker.findUnique({ where: { id } }) + const found = await checkContainer({ dockerId: id, container: 'coolify-proxy', remove: true }); + const { id: settingsId, ipv4, ipv6 } = await listSettings(); + + if (!found) { + const { stdout: Config } = await executeDockerCmd({ dockerId: id, command: `docker network inspect ${network} --format '{{json .IPAM.Config }}'` }) const ip = JSON.parse(Config)[0].Gateway; - await asyncExecShell( - `DOCKER_HOST="${host}" docker run --restart always \ + let traefikUrl = mainTraefikEndpoint + if (remoteEngine) { + let ip = null + if (isDev) { + ip = getAPIUrl() + } else { + ip = `http://${ipv4 || ipv6}:3000` + } + traefikUrl = `${ip}/webhooks/traefik/remote/${id}` + } + await executeDockerCmd({ + dockerId: id, + command: `docker run --restart always \ --add-host 'host.docker.internal:host-gateway' \ - --add-host 'host.docker.internal:${ip}' \ + ${ip ? `--add-host 'host.docker.internal:${ip}'` : ''} \ -v coolify-traefik-letsencrypt:/etc/traefik/acme \ -v /var/run/docker.sock:/var/run/docker.sock \ --network coolify-infra \ @@ -496,96 +592,72 @@ export async function startTraefikProxy(engine: string): Promise { --entrypoints.websecure.forwardedHeaders.insecure=true \ --providers.docker=true \ --providers.docker.exposedbydefault=false \ - --providers.http.endpoint=${mainTraefikEndpoint} \ + --providers.http.endpoint=${traefikUrl} \ --providers.http.pollTimeout=5s \ --certificatesresolvers.letsencrypt.acme.httpchallenge=true \ --certificatesresolvers.letsencrypt.acme.storage=/etc/traefik/acme/acme.json \ --certificatesresolvers.letsencrypt.acme.httpchallenge.entrypoint=web \ --log.level=error` - ); - await prisma.setting.update({ where: { id }, data: { proxyHash: null } }); - await prisma.destinationDocker.updateMany({ - where: { engine }, + }) + await prisma.setting.update({ where: { id: settingsId }, data: { proxyHash: null } }); + await prisma.destinationDocker.update({ + where: { id }, data: { isCoolifyProxyUsed: true } }); } - await configureNetworkTraefikProxy(engine); -} - -export async function configureNetworkTraefikProxy(engine: string): Promise { - const host = getEngine(engine); - const destinations = await prisma.destinationDocker.findMany({ where: { engine } }); - const { stdout: networks } = await asyncExecShell( - `DOCKER_HOST="${host}" docker ps -a --filter name=coolify-proxy --format '{{json .Networks}}'` - ); - const configuredNetworks = networks.replace(/"/g, '').replace('\n', '').split(','); - for (const destination of destinations) { - if (!configuredNetworks.includes(destination.network)) { - await asyncExecShell( - `DOCKER_HOST="${host}" docker network connect ${destination.network} coolify-proxy` - ); + // Configure networks for local docker engine + if (engine) { + const destinations = await prisma.destinationDocker.findMany({ where: { engine } }); + for (const destination of destinations) { + await configureNetworkTraefikProxy(destination); + } + } + // Configure networks for remote docker engine + if (remoteEngine) { + const destinations = await prisma.destinationDocker.findMany({ where: { remoteIpAddress } }); + for (const destination of destinations) { + await configureNetworkTraefikProxy(destination); } } } +export async function configureNetworkTraefikProxy(destination: any): Promise { + const { id } = destination + const { stdout: networks } = await executeDockerCmd({ + dockerId: id, + command: + `docker ps -a --filter name=coolify-proxy --format '{{json .Networks}}'` + }); + const configuredNetworks = networks.replace(/"/g, '').replace('\n', '').split(','); + if (!configuredNetworks.includes(destination.network)) { + await executeDockerCmd({ dockerId: destination.id, command: `docker network connect ${destination.network} coolify-proxy` }) + } +} + export async function stopTraefikProxy( - engine: string + id: string ): Promise<{ stdout: string; stderr: string } | Error> { - const host = getEngine(engine); - const found = await checkContainer(engine, 'coolify-proxy'); - await prisma.destinationDocker.updateMany({ - where: { engine }, + const found = await checkContainer({ dockerId: id, container: 'coolify-proxy' }); + await prisma.destinationDocker.update({ + where: { id }, data: { isCoolifyProxyUsed: false } }); - const { id } = await prisma.setting.findFirst({}); - await prisma.setting.update({ where: { id }, data: { proxyHash: null } }); + const { id: settingsId } = await prisma.setting.findFirst({}); + await prisma.setting.update({ where: { id: settingsId }, data: { proxyHash: null } }); try { if (found) { - await asyncExecShell( - `DOCKER_HOST="${host}" docker stop -t 0 coolify-proxy && docker rm coolify-proxy` - ); + await executeDockerCmd({ + dockerId: id, + command: + `docker stop -t 0 coolify-proxy && docker rm coolify-proxy` + }); + } } catch (error) { return error; } } -export async function startCoolifyProxy(engine: string): Promise { - const host = getEngine(engine); - const found = await checkContainer(engine, 'coolify-haproxy', true); - const { proxyPassword, proxyUser, id } = await listSettings(); - if (!found) { - const { stdout: Config } = await asyncExecShell( - `DOCKER_HOST="${host}" docker network inspect bridge --format '{{json .IPAM.Config }}'` - ); - const ip = JSON.parse(Config)[0].Gateway; - await asyncExecShell( - `DOCKER_HOST="${host}" docker run -e HAPROXY_USERNAME=${proxyUser} -e HAPROXY_PASSWORD=${proxyPassword} --restart always --add-host 'host.docker.internal:host-gateway' --add-host 'host.docker.internal:${ip}' -v coolify-ssl-certs:/usr/local/etc/haproxy/ssl --network coolify-infra -p "80:80" -p "443:443" -p "8404:8404" -p "5555:5555" -p "5000:5000" --name coolify-haproxy -d coollabsio/${defaultProxyImage}` - ); - await prisma.setting.update({ where: { id }, data: { proxyHash: null } }); - await prisma.destinationDocker.updateMany({ - where: { engine }, - data: { isCoolifyProxyUsed: true } - }); - } - await configureNetworkCoolifyProxy(engine); -} - -export async function configureNetworkCoolifyProxy(engine: string): Promise { - const host = getEngine(engine); - const destinations = await prisma.destinationDocker.findMany({ where: { engine } }); - const { stdout: networks } = await asyncExecShell( - `DOCKER_HOST="${host}" docker ps -a --filter name=coolify-haproxy --format '{{json .Networks}}'` - ); - const configuredNetworks = networks.replace(/"/g, '').replace('\n', '').split(','); - for (const destination of destinations) { - if (!configuredNetworks.includes(destination.network)) { - await asyncExecShell( - `DOCKER_HOST="${host}" docker network connect ${destination.network} coolify-haproxy` - ); - } - } -} export async function listSettings(): Promise { const settings = await prisma.setting.findFirst({}); if (settings.proxyPassword) settings.proxyPassword = decrypt(settings.proxyPassword); @@ -593,29 +665,6 @@ export async function listSettings(): Promise { } - -// export async function stopCoolifyProxy( -// engine: string -// ): Promise<{ stdout: string; stderr: string } | Error> { -// const host = getEngine(engine); -// const found = await checkContainer(engine, 'coolify-haproxy'); -// await prisma.destinationDocker.updateMany({ -// where: { engine }, -// data: { isCoolifyProxyUsed: false } -// }); -// const { id } = await prisma.setting.findFirst({}); -// await prisma.setting.update({ where: { id }, data: { proxyHash: null } }); -// try { -// if (found) { -// await asyncExecShell( -// `DOCKER_HOST="${host}" docker stop -t 0 coolify-haproxy && docker rm coolify-haproxy` -// ); -// } -// } catch (error) { -// return error; -// } -// } - export function generatePassword(length = 24, symbols = false): string { return generator.generate({ length, @@ -900,38 +949,6 @@ export const createDirectories = async ({ }; }; -export async function startTcpProxy( - destinationDocker: any, - id: string, - publicPort: number, - privatePort: number -): Promise<{ stdout: string; stderr: string } | Error> { - const { network, engine } = destinationDocker; - const host = getEngine(engine); - - const containerName = `haproxy-for-${publicPort}`; - const found = await checkContainer(engine, containerName, true); - const foundDependentContainer = await checkContainer(engine, id, true); - try { - if (foundDependentContainer && !found) { - const { stdout: Config } = await asyncExecShell( - `DOCKER_HOST="${host}" docker network inspect bridge --format '{{json .IPAM.Config }}'` - ); - const ip = JSON.parse(Config)[0].Gateway; - return await asyncExecShell( - `DOCKER_HOST=${host} docker run --restart always -e PORT=${publicPort} -e APP=${id} -e PRIVATE_PORT=${privatePort} --add-host 'host.docker.internal:host-gateway' --add-host 'host.docker.internal:${ip}' --network ${network} -p ${publicPort}:${publicPort} --name ${containerName} -d coollabsio/${defaultProxyImageTcp}` - ); - } - if (!foundDependentContainer && found) { - return await asyncExecShell( - `DOCKER_HOST=${host} docker stop -t 0 ${containerName} && docker rm ${containerName}` - ); - } - } catch (error) { - return error; - } -} - export async function stopDatabaseContainer( database: any @@ -940,17 +957,15 @@ export async function stopDatabaseContainer( const { id, destinationDockerId, - destinationDocker: { engine } + destinationDocker: { engine, id: dockerId } } = database; if (destinationDockerId) { try { - const host = getEngine(engine); - const { stdout } = await asyncExecShell( - `DOCKER_HOST=${host} docker inspect --format '{{json .State}}' ${id}` - ); + const { stdout } = await executeDockerCmd({ dockerId, command: `docker inspect --format '{{json .State}}' ${id}` }) + if (stdout) { everStarted = true; - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId }); } } catch (error) { // @@ -966,21 +981,18 @@ export async function stopTcpHttpProxy( publicPort: number, forceName: string = null ): Promise<{ stdout: string; stderr: string } | Error> { - const { engine } = destinationDocker; - const host = getEngine(engine); - const settings = await listSettings(); - let containerName = `${id}-${publicPort}`; - if (!settings.isTraefikUsed) { - containerName = `haproxy-for-${publicPort}`; - } - if (forceName) containerName = forceName; - const found = await checkContainer(engine, containerName); - + const { id: dockerId } = destinationDocker; + let container = `${id}-${publicPort}`; + if (forceName) container = forceName; + const found = await checkContainer({ dockerId, container }); try { if (found) { - return await asyncExecShell( - `DOCKER_HOST=${host} docker stop -t 0 ${containerName} && docker rm ${containerName}` - ); + return await executeDockerCmd({ + dockerId, + command: + `docker stop -t 0 ${container} && docker rm ${container}` + }); + } } catch (error) { return error; @@ -998,66 +1010,99 @@ export async function updatePasswordInDb(database, user, newPassword, isRoot) { dbUserPassword, defaultDatabase, destinationDockerId, - destinationDocker: { engine } + destinationDocker: { id: dockerId } } = database; if (destinationDockerId) { - const host = getEngine(engine); if (type === 'mysql') { - await asyncExecShell( - `DOCKER_HOST=${host} docker exec ${id} mysql -u ${rootUser} -p${rootUserPassword} -e \"ALTER USER '${user}'@'%' IDENTIFIED WITH caching_sha2_password BY '${newPassword}';\"` - ); + await executeDockerCmd({ + dockerId, + command: `docker exec ${id} mysql -u ${rootUser} -p${rootUserPassword} -e \"ALTER USER '${user}'@'%' IDENTIFIED WITH caching_sha2_password BY '${newPassword}';\"` + }) } else if (type === 'mariadb') { - await asyncExecShell( - `DOCKER_HOST=${host} docker exec ${id} mysql -u ${rootUser} -p${rootUserPassword} -e \"SET PASSWORD FOR '${user}'@'%' = PASSWORD('${newPassword}');\"` - ); + await executeDockerCmd({ + dockerId, + command: `docker exec ${id} mysql -u ${rootUser} -p${rootUserPassword} -e \"SET PASSWORD FOR '${user}'@'%' = PASSWORD('${newPassword}');\"` + }) + } else if (type === 'postgresql') { if (isRoot) { - await asyncExecShell( - `DOCKER_HOST=${host} docker exec ${id} psql postgresql://postgres:${rootUserPassword}@${id}:5432/${defaultDatabase} -c "ALTER role postgres WITH PASSWORD '${newPassword}'"` - ); + await executeDockerCmd({ + dockerId, + command: `docker exec ${id} psql postgresql://postgres:${rootUserPassword}@${id}:5432/${defaultDatabase} -c "ALTER role postgres WITH PASSWORD '${newPassword}'"` + }) } else { - await asyncExecShell( - `DOCKER_HOST=${host} docker exec ${id} psql postgresql://${dbUser}:${dbUserPassword}@${id}:5432/${defaultDatabase} -c "ALTER role ${user} WITH PASSWORD '${newPassword}'"` - ); + await executeDockerCmd({ + dockerId, + command: `docker exec ${id} psql postgresql://${dbUser}:${dbUserPassword}@${id}:5432/${defaultDatabase} -c "ALTER role ${user} WITH PASSWORD '${newPassword}'"` + }) } } else if (type === 'mongodb') { - await asyncExecShell( - `DOCKER_HOST=${host} docker exec ${id} mongo 'mongodb://${rootUser}:${rootUserPassword}@${id}:27017/admin?readPreference=primary&ssl=false' --eval "db.changeUserPassword('${user}','${newPassword}')"` - ); + await executeDockerCmd({ + dockerId, + command: `docker exec ${id} mongo 'mongodb://${rootUser}:${rootUserPassword}@${id}:27017/admin?readPreference=primary&ssl=false' --eval "db.changeUserPassword('${user}','${newPassword}')"` + }) + } else if (type === 'redis') { - await asyncExecShell( - `DOCKER_HOST=${host} docker exec ${id} redis-cli -u redis://${dbUserPassword}@${id}:6379 --raw CONFIG SET requirepass ${newPassword}` - ); + await executeDockerCmd({ + dockerId, + command: `docker exec ${id} redis-cli -u redis://${dbUserPassword}@${id}:6379 --raw CONFIG SET requirepass ${newPassword}` + }) + } } } +export async function getFreeExposedPort(id, exposePort, dockerId, remoteIpAddress) { + const { default: getPort } = await import('get-port'); + const applicationUsed = await ( + await prisma.application.findMany({ + where: { exposePort: { not: null }, id: { not: id }, destinationDockerId: dockerId }, + select: { exposePort: true } + }) + ).map((a) => a.exposePort); + const serviceUsed = await ( + await prisma.service.findMany({ + where: { exposePort: { not: null }, id: { not: id }, destinationDockerId: dockerId }, + select: { exposePort: true } + }) + ).map((a) => a.exposePort); + const usedPorts = [...applicationUsed, ...serviceUsed]; + if (remoteIpAddress) { + const { default: checkPort } = await import('is-port-reachable'); + const found = await checkPort(exposePort, { host: remoteIpAddress }); + if (!found) { + return exposePort + } + return false + } + return await getPort({ port: Number(exposePort), exclude: usedPorts }); -export async function getFreePort() { +} +export async function getFreePublicPort(id, dockerId) { const { default: getPort, portNumbers } = await import('get-port'); const data = await prisma.setting.findFirst(); const { minPort, maxPort } = data; const dbUsed = await ( await prisma.database.findMany({ - where: { publicPort: { not: null } }, + where: { publicPort: { not: null }, id: { not: id }, destinationDockerId: dockerId }, select: { publicPort: true } }) ).map((a) => a.publicPort); const wpFtpUsed = await ( await prisma.wordpress.findMany({ - where: { ftpPublicPort: { not: null } }, + where: { ftpPublicPort: { not: null }, id: { not: id }, service: { destinationDockerId: dockerId } }, select: { ftpPublicPort: true } }) ).map((a) => a.ftpPublicPort); const wpUsed = await ( await prisma.wordpress.findMany({ - where: { mysqlPublicPort: { not: null } }, + where: { mysqlPublicPort: { not: null }, id: { not: id }, service: { destinationDockerId: dockerId } }, select: { mysqlPublicPort: true } }) ).map((a) => a.mysqlPublicPort); const minioUsed = await ( await prisma.minio.findMany({ - where: { publicPort: { not: null } }, + where: { publicPort: { not: null }, id: { not: id }, service: { destinationDockerId: dockerId } }, select: { publicPort: true } }) ).map((a) => a.publicPort); @@ -1065,7 +1110,6 @@ export async function getFreePort() { return await getPort({ port: portNumbers(minPort, maxPort), exclude: usedPorts }); } - export async function startTraefikTCPProxy( destinationDocker: any, id: string, @@ -1073,34 +1117,48 @@ export async function startTraefikTCPProxy( privatePort: number, type?: string ): Promise<{ stdout: string; stderr: string } | Error> { - const { network, engine } = destinationDocker; - const host = getEngine(engine); - const containerName = `${id}-${publicPort}`; - const found = await checkContainer(engine, containerName, true); + const { network, id: dockerId, remoteEngine } = destinationDocker; + const container = `${id}-${publicPort}`; + const found = await checkContainer({ dockerId, container, remove: true }); + const { ipv4, ipv6 } = await listSettings(); + let dependentId = id; if (type === 'wordpressftp') dependentId = `${id}-ftp`; - const foundDependentContainer = await checkContainer(engine, dependentId, true); + const foundDependentContainer = await checkContainer({ dockerId, container: dependentId, remove: true }); try { if (foundDependentContainer && !found) { - const { stdout: Config } = await asyncExecShell( - `DOCKER_HOST="${host}" docker network inspect bridge --format '{{json .IPAM.Config }}'` - ); + const { stdout: Config } = await executeDockerCmd({ + dockerId, + command: `docker network inspect ${network} --format '{{json .IPAM.Config }}'` + }) + const ip = JSON.parse(Config)[0].Gateway; + let traefikUrl = otherTraefikEndpoint + if (remoteEngine) { + let ip = null + if (isDev) { + ip = getAPIUrl() + } else { + ip = `http://${ipv4 || ipv6}:3000` + } + traefikUrl = `${ip}/webhooks/traefik/other.json` + } + console.log(traefikUrl) const tcpProxy = { - version: '3.5', + version: '3.8', services: { [`${id}-${publicPort}`]: { - container_name: containerName, - image: 'traefik:v2.6', + container_name: container, + image: defaultTraefikImage, command: [ `--entrypoints.tcp.address=:${publicPort}`, `--entryPoints.tcp.forwardedHeaders.insecure=true`, - `--providers.http.endpoint=${otherTraefikEndpoint}?id=${id}&privatePort=${privatePort}&publicPort=${publicPort}&type=tcp&address=${dependentId}`, + `--providers.http.endpoint=${traefikUrl}?id=${id}&privatePort=${privatePort}&publicPort=${publicPort}&type=tcp&address=${dependentId}`, '--providers.http.pollTimeout=2s', '--log.level=error' ], ports: [`${publicPort}:${publicPort}`], - extra_hosts: ['host.docker.internal:host-gateway', `host.docker.internal:${ip}`], + extra_hosts: ['host.docker.internal:host-gateway', `host.docker.internal: ${ip}`], volumes: ['/var/run/docker.sock:/var/run/docker.sock'], networks: ['coolify-infra', network] } @@ -1117,15 +1175,17 @@ export async function startTraefikTCPProxy( } }; await fs.writeFile(`/tmp/docker-compose-${id}.yaml`, yaml.dump(tcpProxy)); - await asyncExecShell( - `DOCKER_HOST=${host} docker compose -f /tmp/docker-compose-${id}.yaml up -d` - ); + await executeDockerCmd({ + dockerId, + command: `docker compose -f /tmp/docker-compose-${id}.yaml up -d` + }) await fs.rm(`/tmp/docker-compose-${id}.yaml`); } if (!foundDependentContainer && found) { - return await asyncExecShell( - `DOCKER_HOST=${host} docker stop -t 0 ${containerName} && docker rm ${containerName}` - ); + await executeDockerCmd({ + dockerId, + command: `docker stop -t 0 ${container} && docker rm ${container}` + }) } } catch (error) { console.log(error); @@ -1350,7 +1410,7 @@ export async function configureServiceType({ } else if (type === 'moodle') { const defaultUsername = cuid(); const defaultPassword = encrypt(generatePassword()); - const defaultEmail = `${cuid()}@example.com`; + const defaultEmail = `${cuid()} @example.com`; const mariadbUser = cuid(); const mariadbPassword = encrypt(generatePassword()); const mariadbDatabase = 'moodle_db'; @@ -1401,7 +1461,7 @@ export async function removeService({ id }: { id: string }): Promise { } export function saveUpdateableFields(type: string, data: any) { - let update = {}; + const update = {}; if (type && serviceFields[type]) { serviceFields[type].map((k) => { let temp = data[k.name] @@ -1426,7 +1486,7 @@ export function saveUpdateableFields(type: string, data: any) { } export function getUpdateableFields(type: string, data: any) { - let update = {}; + const update = {}; if (type && serviceFields[type]) { serviceFields[type].map((k) => { let temp = data[k.name] @@ -1461,9 +1521,9 @@ export const getServiceMainPort = (service: string) => { export function makeLabelForServices(type) { return [ 'coolify.managed=true', - `coolify.version=${version}`, - `coolify.type=service`, - `coolify.service.type=${type}` + `coolify.version = ${version} `, + `coolify.type = service`, + `coolify.service.type = ${type} ` ]; } export function errorHandler({ status = 500, message = 'Unknown error.' }: { status: number, message: string | any }) { @@ -1489,9 +1549,8 @@ export async function stopBuild(buildId, applicationId) { let count = 0; await new Promise(async (resolve, reject) => { const { destinationDockerId, status } = await prisma.build.findFirst({ where: { id: buildId } }); - const { engine } = await prisma.destinationDocker.findFirst({ where: { id: destinationDockerId } }); - const host = getEngine(engine); - let interval = setInterval(async () => { + const { engine, id: dockerId } = await prisma.destinationDocker.findFirst({ where: { id: destinationDockerId } }); + const interval = setInterval(async () => { try { if (status === 'failed') { clearInterval(interval); @@ -1501,17 +1560,14 @@ export async function stopBuild(buildId, applicationId) { clearInterval(interval); return reject(new Error('Build canceled')); } - - const { stdout: buildContainers } = await asyncExecShell( - `DOCKER_HOST=${host} docker container ls --filter "label=coolify.buildId=${buildId}" --format '{{json .}}'` - ); + const { stdout: buildContainers } = await executeDockerCmd({ dockerId, command: `docker container ls--filter "label=coolify.buildId=${buildId}" --format '{{json .}}'` }) if (buildContainers) { const containersArray = buildContainers.trim().split('\n'); for (const container of containersArray) { const containerObj = JSON.parse(container); const id = containerObj.ID; - if (!containerObj.Names.startsWith(`${applicationId}`)) { - await removeContainer({ id, engine }); + if (!containerObj.Names.startsWith(`${applicationId} `)) { + await removeContainer({ id, dockerId }); await cleanupDB(buildId); clearInterval(interval); return resolve(); @@ -1539,16 +1595,15 @@ export function convertTolOldVolumeNames(type) { // export async function getAvailableServices(): Promise { // const { data } = await axios.get(`https://gist.githubusercontent.com/andrasbacsai/4aac36d8d6214dbfc34fa78110554a50/raw/5b27e6c37d78aaeedc1148d797112c827a2f43cf/availableServices.json`) // return data -// } -export async function cleanupDockerStorage(host, lowDiskSpace, force) { +// +export async function cleanupDockerStorage(dockerId, lowDiskSpace, force) { // Cleanup old coolify images try { - let { stdout: images } = await asyncExecShell( - `DOCKER_HOST=${host} docker images coollabsio/coolify --filter before="coollabsio/coolify:${version}" -q | xargs ` - ); + let { stdout: images } = await executeDockerCmd({ dockerId, command: `docker images coollabsio/coolify --filter before="coollabsio/coolify:${version}" -q | xargs` }) + images = images.trim(); if (images) { - await asyncExecShell(`DOCKER_HOST=${host} docker rmi -f ${images}`); + await executeDockerCmd({ dockerId, command: `docker rmi -f ${images}" -q | xargs` }) } } catch (error) { //console.log(error); @@ -1559,19 +1614,48 @@ export async function cleanupDockerStorage(host, lowDiskSpace, force) { return } try { - await asyncExecShell(`DOCKER_HOST=${host} docker container prune -f`); + await executeDockerCmd({ dockerId, command: `docker container prune -f` }) } catch (error) { //console.log(error); } try { - await asyncExecShell(`DOCKER_HOST=${host} docker image prune -f`); + await executeDockerCmd({ dockerId, command: `docker image prune -f` }) } catch (error) { //console.log(error); } try { - await asyncExecShell(`DOCKER_HOST=${host} docker image prune -a -f`); + await executeDockerCmd({ dockerId, command: `docker image prune -a -f` }) } catch (error) { //console.log(error); } } +} + +export function persistentVolumes(id, persistentStorage, config) { + const persistentVolume = + persistentStorage?.map((storage) => { + return `${id}${storage.path.replace(/\//gi, '-')}:${storage.path}`; + }) || []; + + let volumes = [ ...persistentVolume] + if (config.volume) volumes = [config.volume, ...volumes] + + const composeVolumes = volumes.length > 0 && volumes.map((volume) => { + return { + [`${volume.split(':')[0]}`]: { + name: volume.split(':')[0] + } + }; + }) || [] + + const volumeMounts = config.volume && Object.assign( + {}, + { + [config.volume.split(':')[0]]: { + name: config.volume.split(':')[0] + } + }, + ...composeVolumes + ) || {} + return { volumes, volumeMounts } } \ No newline at end of file diff --git a/apps/api/src/lib/docker.ts b/apps/api/src/lib/docker.ts index 730666590..db2dbc3e7 100644 --- a/apps/api/src/lib/docker.ts +++ b/apps/api/src/lib/docker.ts @@ -1,33 +1,43 @@ -import { asyncExecShell } from './common'; -import Dockerode from 'dockerode'; -export function getEngine(engine: string): string { - return engine === '/var/run/docker.sock' ? 'unix:///var/run/docker.sock' : engine; -} -export function dockerInstance({ destinationDocker }): { engine: Dockerode; network: string } { - return { - engine: new Dockerode({ - socketPath: destinationDocker.engine - }), - network: destinationDocker.network - }; -} +import { executeDockerCmd } from './common'; -export async function checkContainer(engine: string, container: string, remove = false): Promise { - const host = getEngine(engine); +export function formatLabelsOnDocker(data) { + return data.trim().split('\n').map(a => JSON.parse(a)).map((container) => { + const labels = container.Labels.split(',') + let jsonLabels = {} + labels.forEach(l => { + const name = l.split('=')[0] + const value = l.split('=')[1] + jsonLabels = { ...jsonLabels, ...{ [name]: value } } + }) + container.Labels = jsonLabels; + return container + }) +} +export async function checkContainer({ dockerId, container, remove = false }: { dockerId: string, container: string, remove?: boolean }): Promise { let containerFound = false; - try { - const { stdout } = await asyncExecShell( - `DOCKER_HOST="${host}" docker inspect --format '{{json .State}}' ${container}` - ); + const { stdout } = await executeDockerCmd({ + dockerId, + command: + `docker inspect --format '{{json .State}}' ${container}` + }); + const parsedStdout = JSON.parse(stdout); const status = parsedStdout.Status; const isRunning = status === 'running'; if (status === 'created') { - await asyncExecShell(`DOCKER_HOST="${host}" docker rm ${container}`); + await executeDockerCmd({ + dockerId, + command: + `docker rm ${container}` + }); } if (remove && status === 'exited') { - await asyncExecShell(`DOCKER_HOST="${host}" docker rm ${container}`); + await executeDockerCmd({ + dockerId, + command: + `docker rm ${container}` + }); } if (isRunning) { containerFound = true; @@ -38,13 +48,10 @@ export async function checkContainer(engine: string, container: string, remove = return containerFound; } -export async function isContainerExited(engine: string, containerName: string): Promise { +export async function isContainerExited(dockerId: string, containerName: string): Promise { let isExited = false; - const host = getEngine(engine); try { - const { stdout } = await asyncExecShell( - `DOCKER_HOST="${host}" docker inspect -f '{{.State.Status}}' ${containerName}` - ); + const { stdout } = await executeDockerCmd({ dockerId, command: `docker inspect -f '{{.State.Status}}' ${containerName}` }) if (stdout.trim() === 'exited') { isExited = true; } @@ -57,19 +64,17 @@ export async function isContainerExited(engine: string, containerName: string): export async function removeContainer({ id, - engine + dockerId }: { id: string; - engine: string; + dockerId: string; }): Promise { - const host = getEngine(engine); try { - const { stdout } = await asyncExecShell( - `DOCKER_HOST=${host} docker inspect --format '{{json .State}}' ${id}` - ); + const { stdout } = await executeDockerCmd({ dockerId, command: `docker inspect --format '{{json .State}}' ${id}` }) + if (JSON.parse(stdout).Running) { - await asyncExecShell(`DOCKER_HOST=${host} docker stop -t 0 ${id}`); - await asyncExecShell(`DOCKER_HOST=${host} docker rm ${id}`); + await executeDockerCmd({ dockerId, command: `docker stop -t 0 ${id}` }) + await executeDockerCmd({ dockerId, command: `docker rm ${id}` }) } } catch (error) { console.log(error); diff --git a/apps/api/src/lib/serviceFields.ts b/apps/api/src/lib/serviceFields.ts index 7a6b00a85..89d6a9c70 100644 --- a/apps/api/src/lib/serviceFields.ts +++ b/apps/api/src/lib/serviceFields.ts @@ -344,7 +344,7 @@ export const fider = [{ { name: 'emailNoreply', isEditable: true, - isLowerCase: true, + isLowerCase: false, isNumber: false, isBoolean: false, isEncrypted: false @@ -352,7 +352,7 @@ export const fider = [{ { name: 'emailSmtpHost', isEditable: true, - isLowerCase: true, + isLowerCase: false, isNumber: false, isBoolean: false, isEncrypted: false @@ -376,7 +376,7 @@ export const fider = [{ { name: 'emailSmtpUser', isEditable: true, - isLowerCase: true, + isLowerCase: false, isNumber: false, isBoolean: false, isEncrypted: false diff --git a/apps/api/src/routes/api/v1/applications/handlers.ts b/apps/api/src/routes/api/v1/applications/handlers.ts index 5cb32db22..b3ef49133 100644 --- a/apps/api/src/routes/api/v1/applications/handlers.ts +++ b/apps/api/src/routes/api/v1/applications/handlers.ts @@ -5,12 +5,12 @@ import axios from 'axios'; import { FastifyReply } from 'fastify'; import { day } from '../../../../lib/dayjs'; import { setDefaultBaseImage, setDefaultConfiguration } from '../../../../lib/buildPacks/common'; -import { asyncExecShell, checkDomainsIsValidInDNS, checkDoubleBranch, decrypt, encrypt, errorHandler, generateSshKeyPair, getContainerUsage, getDomain, isDev, isDomainConfigured, prisma, stopBuild, uniqueName } from '../../../../lib/common'; -import { checkContainer, dockerInstance, getEngine, isContainerExited, removeContainer } from '../../../../lib/docker'; +import { checkDomainsIsValidInDNS, checkDoubleBranch, decrypt, encrypt, errorHandler, executeDockerCmd, generateSshKeyPair, getContainerUsage, getDomain, getFreeExposedPort, isDev, isDomainConfigured, prisma, stopBuild, uniqueName } from '../../../../lib/common'; +import { checkContainer, formatLabelsOnDocker, isContainerExited, removeContainer } from '../../../../lib/docker'; import { scheduler } from '../../../../lib/scheduler'; import type { FastifyRequest } from 'fastify'; -import type { GetImages, CancelDeployment, CheckDNS, CheckRepository, DeleteApplication, DeleteSecret, DeleteStorage, GetApplicationLogs, GetBuildIdLogs, GetBuildLogs, SaveApplication, SaveApplicationSettings, SaveApplicationSource, SaveDeployKey, SaveDestination, SaveSecret, SaveStorage, DeployApplication } from './types'; +import type { GetImages, CancelDeployment, CheckDNS, CheckRepository, DeleteApplication, DeleteSecret, DeleteStorage, GetApplicationLogs, GetBuildIdLogs, GetBuildLogs, SaveApplication, SaveApplicationSettings, SaveApplicationSource, SaveDeployKey, SaveDestination, SaveSecret, SaveStorage, DeployApplication, CheckDomain, StopPreviewApplication } from './types'; import { OnlyId } from '../../../../types'; export async function listApplications(request: FastifyRequest) { @@ -18,7 +18,7 @@ export async function listApplications(request: FastifyRequest) { const { teamId } = request.user const applications = await prisma.application.findMany({ where: { teams: { some: { id: teamId === '0' ? undefined : teamId } } }, - include: { teams: true } + include: { teams: true, destinationDocker: true } }); const settings = await prisma.setting.findFirst() return { @@ -57,7 +57,28 @@ export async function getImages(request: FastifyRequest) { } - return { baseImage, baseBuildImage, baseBuildImages, baseImages, publishDirectory, port } + return { baseBuildImage, baseBuildImages, publishDirectory, port } + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} +export async function getApplicationStatus(request: FastifyRequest) { + try { + const { id } = request.params + const { teamId } = request.user + let isRunning = false; + let isExited = false; + + const application: any = await getApplicationFromDB(id, teamId); + if (application?.destinationDockerId) { + isRunning = await checkContainer({ dockerId: application.destinationDocker.id, container: id }); + isExited = await isContainerExited(application.destinationDocker.id, id); + } + return { + isQueueActive: scheduler.workers.has('deployApplication'), + isRunning, + isExited, + }; } catch ({ status, message }) { return errorHandler({ status, message }) } @@ -68,17 +89,9 @@ export async function getApplication(request: FastifyRequest) { const { id } = request.params const { teamId } = request.user const appId = process.env['COOLIFY_APP_ID']; - let isRunning = false; - let isExited = false; const application: any = await getApplicationFromDB(id, teamId); - if (application?.destinationDockerId && application.destinationDocker?.engine) { - isRunning = await checkContainer(application.destinationDocker.engine, id); - isExited = await isContainerExited(application.destinationDocker.engine, id); - } + return { - isQueueActive: scheduler.workers.has('deployApplication'), - isRunning, - isExited, application, appId }; @@ -279,16 +292,35 @@ export async function saveApplicationSettings(request: FastifyRequest, reply: FastifyReply) { + try { + const { id } = request.params + const { pullmergeRequestId } = request.body + const { teamId } = request.user + const application: any = await getApplicationFromDB(id, teamId); + if (application?.destinationDockerId) { + const container = `${id}-${pullmergeRequestId}` + const { id: dockerId } = application.destinationDocker; + const found = await checkContainer({ dockerId, container }); + if (found) { + await removeContainer({ id: container, dockerId: application.destinationDocker.id }); + } + } + return reply.code(201).send(); + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} export async function stopApplication(request: FastifyRequest, reply: FastifyReply) { try { const { id } = request.params const { teamId } = request.user const application: any = await getApplicationFromDB(id, teamId); - if (application?.destinationDockerId && application.destinationDocker?.engine) { - const { engine } = application.destinationDocker; - const found = await checkContainer(engine, id); + if (application?.destinationDockerId) { + const { id: dockerId } = application.destinationDocker; + const found = await checkContainer({ dockerId, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: application.destinationDocker.id }); } } return reply.code(201).send(); @@ -304,17 +336,17 @@ export async function deleteApplication(request: FastifyRequest) { + try { + const { id } = request.params + const { domain } = request.query + const { fqdn, settings: { dualCerts } } = await prisma.application.findUnique({ where: { id }, include: { settings: true } }) + return await checkDomainsIsValidInDNS({ hostname: domain, fqdn, dualCerts }); + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} export async function checkDNS(request: FastifyRequest) { try { const { id } = request.params let { exposePort, fqdn, forceSave, dualCerts } = request.body - fqdn = fqdn.toLowerCase(); + if (fqdn) fqdn = fqdn.toLowerCase(); + if (exposePort) exposePort = Number(exposePort); + + const { destinationDocker: { id: dockerId, remoteIpAddress, remoteEngine }, exposePort: configuredPort } = await prisma.application.findUnique({ where: { id }, include: { destinationDocker: true } }) const { isDNSCheckEnabled } = await prisma.setting.findFirst({}); - const found = await isDomainConfigured({ id, fqdn }); + + const found = await isDomainConfigured({ id, fqdn, remoteIpAddress }); if (found) { throw { status: 500, message: `Domain ${getDomain(fqdn).replace('www.', '')} is already in use!` } } if (exposePort) { - exposePort = Number(exposePort); - if (exposePort < 1024 || exposePort > 65535) { throw { status: 500, message: `Exposed Port needs to be between 1024 and 65535.` } } - const { default: getPort } = await import('get-port'); - const publicPort = await getPort({ port: exposePort }); - if (publicPort !== exposePort) { - throw { status: 500, message: `Port ${exposePort} is already in use.` } + + if (configuredPort !== exposePort) { + const availablePort = await getFreeExposedPort(id, exposePort, dockerId, remoteIpAddress); + if (availablePort.toString() !== exposePort.toString()) { + throw { status: 500, message: `Port ${exposePort} is already in use.` } + } } } if (isDNSCheckEnabled && !isDev && !forceSave) { - return await checkDomainsIsValidInDNS({ hostname: request.hostname.split(':')[0], fqdn, dualCerts }); + let hostname = request.hostname.split(':')[0]; + if (remoteEngine) hostname = remoteIpAddress; + return await checkDomainsIsValidInDNS({ hostname, fqdn, dualCerts }); } return {} } catch ({ status, message }) { @@ -375,7 +422,7 @@ export async function getUsage(request) { const application: any = await getApplicationFromDB(id, teamId); if (application.destinationDockerId) { - [usage] = await Promise.all([getContainerUsage(application.destinationDocker.engine, id)]); + [usage] = await Promise.all([getContainerUsage(application.destinationDocker.id, id)]); } return { usage @@ -701,21 +748,20 @@ export async function getPreviews(request: FastifyRequest) { secret.value = decrypt(secret.value); return secret; }); + const applicationSecrets = secrets.filter((secret) => !secret.isPRMRSecret); const PRMRSecrets = secrets.filter((secret) => secret.isPRMRSecret); - const destinationDocker = await prisma.destinationDocker.findFirst({ - where: { application: { some: { id } }, teams: { some: { id: teamId } } } - }); - const docker = dockerInstance({ destinationDocker }); - const listContainers = await docker.engine.listContainers({ - filters: { network: [destinationDocker.network], name: [id] } - }); - const containers = listContainers.filter((container) => { - return ( - container.Labels['coolify.configuration'] && - container.Labels['coolify.type'] === 'standalone-application' - ); - }); + const application = await prisma.application.findUnique({ where: { id }, include: { destinationDocker: true } }); + const { stdout } = await executeDockerCmd({ dockerId: application.destinationDocker.id, command: `docker container ls --filter 'name=${id}-' --format "{{json .}}"` }) + if (stdout === '') { + return { + containers: [], + applicationSecrets: [], + PRMRSecrets: [] + } + } + const containers = formatLabelsOnDocker(stdout).filter(container => container.Labels['coolify.configuration'] && container.Labels['coolify.type'] === 'standalone-application') + const jsonContainers = containers .map((container) => JSON.parse(Buffer.from(container.Labels['coolify.configuration'], 'base64').toString()) @@ -733,50 +779,46 @@ export async function getPreviews(request: FastifyRequest) { }) } } catch ({ status, message }) { + console.log({ status, message }) return errorHandler({ status, message }) } } export async function getApplicationLogs(request: FastifyRequest) { try { - const { id } = request.params + const { id } = request.params; let { since = 0 } = request.query if (since !== 0) { since = day(since).unix(); } - const { destinationDockerId, destinationDocker } = await prisma.application.findUnique({ + const { destinationDockerId, destinationDocker: { id: dockerId } } = await prisma.application.findUnique({ where: { id }, include: { destinationDocker: true } }); if (destinationDockerId) { - const docker = dockerInstance({ destinationDocker }); try { - const container = await docker.engine.getContainer(id); - if (container) { - const { default: ansi } = await import('strip-ansi') - const logs = ( - await container.logs({ - stdout: true, - stderr: true, - timestamps: true, - since, - tail: 5000 - }) - ) - .toString() - .split('\n') - .map((l) => ansi(l.slice(8))) - .filter((a) => a); + // const found = await checkContainer({ dockerId, container: id }) + // if (found) { + const { default: ansi } = await import('strip-ansi') + const { stdout, stderr } = await executeDockerCmd({ dockerId, command: `docker logs --since ${since} --tail 5000 --timestamps ${id}` }) + const stripLogsStdout = stdout.toString().split('\n').map((l) => ansi(l)).filter((a) => a); + const stripLogsStderr = stderr.toString().split('\n').map((l) => ansi(l)).filter((a) => a); + const logs = stripLogsStderr.concat(stripLogsStdout) + const sortedLogs = logs.sort((a, b) => (day(a.split(' ')[0]).isAfter(day(b.split(' ')[0])) ? 1 : -1)) + return { logs: sortedLogs } + // } + } catch (error) { + const { statusCode } = error; + if (statusCode === 404) { return { - logs + logs: [] }; } - } catch (error) { - return { - logs: [] - }; } } + return { + message: 'No logs found.' + } } catch ({ status, message }) { return errorHandler({ status, message }) } diff --git a/apps/api/src/routes/api/v1/applications/index.ts b/apps/api/src/routes/api/v1/applications/index.ts index aa359f973..2f698ddeb 100644 --- a/apps/api/src/routes/api/v1/applications/index.ts +++ b/apps/api/src/routes/api/v1/applications/index.ts @@ -1,8 +1,8 @@ import { FastifyPluginAsync } from 'fastify'; import { OnlyId } from '../../../../types'; -import { cancelDeployment, checkDNS, checkRepository, deleteApplication, deleteSecret, deleteStorage, deployApplication, getApplication, getApplicationLogs, getBuildIdLogs, getBuildLogs, getBuildPack, getGitHubToken, getGitLabSSHKey, getImages, getPreviews, getSecrets, getStorages, getUsage, listApplications, newApplication, saveApplication, saveApplicationSettings, saveApplicationSource, saveBuildPack, saveDeployKey, saveDestination, saveGitLabSSHKey, saveRepository, saveSecret, saveStorage, stopApplication } from './handlers'; +import { cancelDeployment, checkDNS, checkDomain, checkRepository, deleteApplication, deleteSecret, deleteStorage, deployApplication, getApplication, getApplicationLogs, getApplicationStatus, getBuildIdLogs, getBuildLogs, getBuildPack, getGitHubToken, getGitLabSSHKey, getImages, getPreviews, getSecrets, getStorages, getUsage, listApplications, newApplication, saveApplication, saveApplicationSettings, saveApplicationSource, saveBuildPack, saveDeployKey, saveDestination, saveGitLabSSHKey, saveRepository, saveSecret, saveStorage, stopApplication, stopPreviewApplication } from './handlers'; -import type { CancelDeployment, CheckDNS, CheckRepository, DeleteApplication, DeleteSecret, DeleteStorage, DeployApplication, GetApplicationLogs, GetBuildIdLogs, GetBuildLogs, GetImages, SaveApplication, SaveApplicationSettings, SaveApplicationSource, SaveDeployKey, SaveDestination, SaveSecret, SaveStorage } from './types'; +import type { CancelDeployment, CheckDNS, CheckDomain, CheckRepository, DeleteApplication, DeleteSecret, DeleteStorage, DeployApplication, GetApplicationLogs, GetBuildIdLogs, GetBuildLogs, GetImages, SaveApplication, SaveApplicationSettings, SaveApplicationSource, SaveDeployKey, SaveDestination, SaveSecret, SaveStorage, StopPreviewApplication } from './types'; const root: FastifyPluginAsync = async (fastify): Promise => { fastify.addHook('onRequest', async (request) => { @@ -17,9 +17,14 @@ const root: FastifyPluginAsync = async (fastify): Promise => { fastify.post('/:id', async (request, reply) => await saveApplication(request, reply)); fastify.delete('/:id', async (request, reply) => await deleteApplication(request, reply)); + fastify.get('/:id/status', async (request) => await getApplicationStatus(request)); + fastify.post('/:id/stop', async (request, reply) => await stopApplication(request, reply)); + fastify.post('/:id/stop/preview', async (request, reply) => await stopPreviewApplication(request, reply)); fastify.post('/:id/settings', async (request, reply) => await saveApplicationSettings(request, reply)); + + fastify.get('/:id/check', async (request) => await checkDomain(request)); fastify.post('/:id/check', async (request) => await checkDNS(request)); fastify.get('/:id/secrets', async (request) => await getSecrets(request)); diff --git a/apps/api/src/routes/api/v1/applications/types.ts b/apps/api/src/routes/api/v1/applications/types.ts index 2d9b9913d..40dabc20a 100644 --- a/apps/api/src/routes/api/v1/applications/types.ts +++ b/apps/api/src/routes/api/v1/applications/types.ts @@ -30,6 +30,9 @@ export interface SaveApplicationSettings extends OnlyId { export interface DeleteApplication extends OnlyId { Querystring: { domain: string; }; } +export interface CheckDomain extends OnlyId { + Querystring: { domain: string; }; +} export interface CheckDNS extends OnlyId { Querystring: { domain: string; }; Body: { @@ -115,3 +118,9 @@ export interface DeployApplication extends OnlyId { branch: string } } + +export interface StopPreviewApplication extends OnlyId { + Body: { + pullmergeRequestId: string | null, + } +} \ No newline at end of file diff --git a/apps/api/src/routes/api/v1/base/index.ts b/apps/api/src/routes/api/v1/base/index.ts index 9eaa8c40a..76735a032 100644 --- a/apps/api/src/routes/api/v1/base/index.ts +++ b/apps/api/src/routes/api/v1/base/index.ts @@ -1,10 +1,13 @@ import { FastifyPluginAsync } from 'fastify'; -import { errorHandler, version } from '../../../../lib/common'; +import { errorHandler, listSettings, version } from '../../../../lib/common'; const root: FastifyPluginAsync = async (fastify): Promise => { fastify.get('/', async () => { + const settings = await listSettings() try { return { + ipv4: settings.ipv4, + ipv6: settings.ipv6, version, whiteLabeled: process.env.COOLIFY_WHITE_LABELED === 'true', whiteLabeledIcon: process.env.COOLIFY_WHITE_LABELED_ICON, diff --git a/apps/api/src/routes/api/v1/databases/handlers.ts b/apps/api/src/routes/api/v1/databases/handlers.ts index 8d05bb3a5..ca5fc67c0 100644 --- a/apps/api/src/routes/api/v1/databases/handlers.ts +++ b/apps/api/src/routes/api/v1/databases/handlers.ts @@ -3,24 +3,20 @@ import type { FastifyRequest } from 'fastify'; import { FastifyReply } from 'fastify'; import yaml from 'js-yaml'; import fs from 'fs/promises'; -import { asyncExecShell, ComposeFile, createDirectories, decrypt, encrypt, errorHandler, generateDatabaseConfiguration, generatePassword, getContainerUsage, getDatabaseImage, getDatabaseVersions, getFreePort, listSettings, makeLabelForStandaloneDatabase, prisma, startTcpProxy, startTraefikTCPProxy, stopDatabaseContainer, stopTcpHttpProxy, supportedDatabaseTypesAndVersions, uniqueName, updatePasswordInDb } from '../../../../lib/common'; -import { dockerInstance, getEngine } from '../../../../lib/docker'; +import { ComposeFile, createDirectories, decrypt, encrypt, errorHandler, executeDockerCmd, generateDatabaseConfiguration, generatePassword, getContainerUsage, getDatabaseImage, getDatabaseVersions, getFreePublicPort, listSettings, makeLabelForStandaloneDatabase, prisma, startTraefikTCPProxy, stopDatabaseContainer, stopTcpHttpProxy, supportedDatabaseTypesAndVersions, uniqueName, updatePasswordInDb } from '../../../../lib/common'; +import { checkContainer } from '../../../../lib/docker'; import { day } from '../../../../lib/dayjs'; + import { GetDatabaseLogs, OnlyId, SaveDatabase, SaveDatabaseDestination, SaveDatabaseSettings, SaveVersion } from '../../../../types'; import { SaveDatabaseType } from './types'; export async function listDatabases(request: FastifyRequest) { try { const teamId = request.user.teamId; - let databases = [] - if (teamId === '0') { - databases = await prisma.database.findMany({ include: { teams: true } }); - } else { - databases = await prisma.database.findMany({ - where: { teams: { some: { id: teamId } } }, - include: { teams: true } - }); - } + const databases = await prisma.database.findMany({ + where: { teams: { some: { id: teamId === '0' ? undefined : teamId } } }, + include: { teams: true, destinationDocker: true } + }); return { databases } @@ -56,6 +52,36 @@ export async function newDatabase(request: FastifyRequest, reply: FastifyReply) return errorHandler({ status, message }) } } +export async function getDatabaseStatus(request: FastifyRequest) { + try { + const { id } = request.params; + const teamId = request.user.teamId; + let isRunning = false; + + const database = await prisma.database.findFirst({ + where: { id, teams: { some: { id: teamId === '0' ? undefined : teamId } } }, + include: { destinationDocker: true, settings: true } + }); + const { destinationDockerId, destinationDocker } = database; + if (destinationDockerId) { + try { + const { stdout } = await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker inspect --format '{{json .State}}' ${id}` }) + + if (JSON.parse(stdout).Running) { + isRunning = true; + } + } catch (error) { + // + } + } + return { + isRunning + } + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} + export async function getDatabase(request: FastifyRequest) { try { const { id } = request.params; @@ -69,29 +95,11 @@ export async function getDatabase(request: FastifyRequest) { } if (database.dbUserPassword) database.dbUserPassword = decrypt(database.dbUserPassword); if (database.rootUserPassword) database.rootUserPassword = decrypt(database.rootUserPassword); - const { destinationDockerId, destinationDocker } = database; - let isRunning = false; - if (destinationDockerId) { - const host = getEngine(destinationDocker.engine); - - try { - const { stdout } = await asyncExecShell( - `DOCKER_HOST=${host} docker inspect --format '{{json .State}}' ${id}` - ); - - if (JSON.parse(stdout).Running) { - isRunning = true; - } - } catch (error) { - // - } - } const configuration = generateDatabaseConfiguration(database); const settings = await listSettings(); return { privatePort: configuration?.privatePort, database, - isRunning, versions: await getDatabaseVersions(database.type), settings }; @@ -164,16 +172,15 @@ export async function saveDatabaseDestination(request: FastifyRequest) { if (database.dbUserPassword) database.dbUserPassword = decrypt(database.dbUserPassword); if (database.rootUserPassword) database.rootUserPassword = decrypt(database.rootUserPassword); if (database.destinationDockerId) { - [usage] = await Promise.all([getContainerUsage(database.destinationDocker.engine, id)]); + [usage] = await Promise.all([getContainerUsage(database.destinationDocker.id, id)]); } return { usage @@ -225,7 +232,6 @@ export async function startDatabase(request: FastifyRequest) { generateDatabaseConfiguration(database); const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const volumeName = volume.split(':')[0]; const labels = await makeLabelForStandaloneDatabase({ id, image, volume }); @@ -267,13 +273,13 @@ export async function startDatabase(request: FastifyRequest) { const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); try { - await asyncExecShell(`DOCKER_HOST=${host} docker volume create ${volumeName}`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker volume create ${volumeName}` }) } catch (error) { console.log(error); } try { - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); - if (isPublic) await startTcpProxy(destinationDocker, id, publicPort, privatePort); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up -d` }) + if (isPublic) await startTraefikTCPProxy(destinationDocker, id, publicPort, privatePort); return {}; } catch (error) { throw { @@ -311,39 +317,27 @@ export async function stopDatabase(request: FastifyRequest) { } export async function getDatabaseLogs(request: FastifyRequest) { try { - const teamId = request.user.teamId; const { id } = request.params; let { since = 0 } = request.query if (since !== 0) { since = day(since).unix(); } - const { destinationDockerId, destinationDocker } = await prisma.database.findUnique({ + const { destinationDockerId, destinationDocker: { id: dockerId } } = await prisma.database.findUnique({ where: { id }, include: { destinationDocker: true } }); if (destinationDockerId) { - const docker = dockerInstance({ destinationDocker }); try { - const container = await docker.engine.getContainer(id); - if (container) { - const { default: ansi } = await import('strip-ansi') - const logs = ( - await container.logs({ - stdout: true, - stderr: true, - timestamps: true, - since, - tail: 5000 - }) - ) - .toString() - .split('\n') - .map((l) => ansi(l.slice(8))) - .filter((a) => a); - return { - logs - }; - } + // const found = await checkContainer({ dockerId, container: id }) + // if (found) { + const { default: ansi } = await import('strip-ansi') + const { stdout, stderr } = await executeDockerCmd({ dockerId, command: `docker logs --since ${since} --tail 5000 --timestamps ${id}` }) + const stripLogsStdout = stdout.toString().split('\n').map((l) => ansi(l)).filter((a) => a); + const stripLogsStderr = stderr.toString().split('\n').map((l) => ansi(l)).filter((a) => a); + const logs = stripLogsStderr.concat(stripLogsStdout) + const sortedLogs = logs.sort((a, b) => (day(a.split(' ')[0]).isAfter(day(b.split(' ')[0])) ? 1 : -1)) + return { logs: sortedLogs } + // } } catch (error) { const { statusCode } = error; if (statusCode === 404) { @@ -432,8 +426,10 @@ export async function saveDatabaseSettings(request: FastifyRequest => { fastify.post('/:id', async (request, reply) => await saveDatabase(request, reply)); fastify.delete('/:id', async (request) => await deleteDatabase(request)); + fastify.get('/:id/status', async (request) => await getDatabaseStatus(request)); + fastify.post('/:id/settings', async (request) => await saveDatabaseSettings(request)); fastify.get('/:id/configuration/type', async (request) => await getDatabaseTypes(request)); diff --git a/apps/api/src/routes/api/v1/destinations/handlers.ts b/apps/api/src/routes/api/v1/destinations/handlers.ts index 40d68a84b..3966ecab9 100644 --- a/apps/api/src/routes/api/v1/destinations/handlers.ts +++ b/apps/api/src/routes/api/v1/destinations/handlers.ts @@ -1,14 +1,19 @@ import type { FastifyRequest } from 'fastify'; import { FastifyReply } from 'fastify'; -import { asyncExecShell, errorHandler, listSettings, prisma, startCoolifyProxy, startTraefikProxy, stopTraefikProxy } from '../../../../lib/common'; -import { checkContainer, dockerInstance, getEngine } from '../../../../lib/docker'; +import sshConfig from 'ssh-config' +import fs from 'fs/promises' +import os from 'os'; + +import { asyncExecShell, createRemoteEngineConfiguration, decrypt, errorHandler, executeDockerCmd, listSettings, prisma, startTraefikProxy, stopTraefikProxy } from '../../../../lib/common'; +import { checkContainer } from '../../../../lib/docker'; import type { OnlyId } from '../../../../types'; -import type { CheckDestination, NewDestination, Proxy, SaveDestinationSettings } from './types'; +import type { CheckDestination, ListDestinations, NewDestination, Proxy, SaveDestinationSettings } from './types'; -export async function listDestinations(request: FastifyRequest) { +export async function listDestinations(request: FastifyRequest) { try { const teamId = request.user.teamId; + const { onlyVerified = false } = request.query let destinations = [] if (teamId === '0') { destinations = await prisma.destinationDocker.findMany({ include: { teams: true } }); @@ -18,10 +23,14 @@ export async function listDestinations(request: FastifyRequest) { include: { teams: true } }); } + if (onlyVerified) { + destinations = destinations.filter(destination => destination.engine || (destination.remoteEngine && destination.remoteVerified)) + } return { destinations } } catch ({ status, message }) { + console.log({ status, message }) return errorHandler({ status, message }) } } @@ -44,7 +53,8 @@ export async function getDestination(request: FastifyRequest) { const { id } = request.params const teamId = request.user?.teamId; const destination = await prisma.destinationDocker.findFirst({ - where: { id, teams: { some: { id: teamId === '0' ? undefined : teamId } } } + where: { id, teams: { some: { id: teamId === '0' ? undefined : teamId } } }, + include: { sshKey: true, application: true, service: true, database: true } }); if (!destination && id !== 'new') { throw { status: 404, message: `Destination not found.` }; @@ -52,23 +62,8 @@ export async function getDestination(request: FastifyRequest) { const settings = await listSettings(); let payload = { destination, - settings, - state: false + settings }; - - if (destination?.remoteEngine) { - // const { stdout } = await asyncExecShell( - // `ssh -p ${destination.port} ${destination.user}@${destination.ipAddress} "docker ps -a"` - // ); - // console.log(stdout) - // const engine = await generateRemoteEngine(destination); - // // await saveSshKey(destination); - // payload.state = await checkContainer(engine, 'coolify-haproxy'); - } else { - const containerName = 'coolify-proxy'; - payload.state = - destination?.engine && (await checkContainer(destination.engine, containerName)); - } return { ...payload }; @@ -79,68 +74,68 @@ export async function getDestination(request: FastifyRequest) { } export async function newDestination(request: FastifyRequest, reply: FastifyReply) { try { - const { id } = request.params - let { name, network, engine, isCoolifyProxyUsed } = request.body const teamId = request.user.teamId; - if (id === 'new') { - const host = getEngine(engine); - const docker = dockerInstance({ destinationDocker: { engine, network } }); - const found = await docker.engine.listNetworks({ filters: { name: [`^${network}$`] } }); - if (found.length === 0) { - await asyncExecShell(`DOCKER_HOST=${host} docker network create --attachable ${network}`); - } - await prisma.destinationDocker.create({ - data: { name, teams: { connect: { id: teamId } }, engine, network, isCoolifyProxyUsed } - }); - const destinations = await prisma.destinationDocker.findMany({ where: { engine } }); - const destination = destinations.find((destination) => destination.network === network); + const { id } = request.params - if (destinations.length > 0) { - const proxyConfigured = destinations.find( - (destination) => destination.network !== network && destination.isCoolifyProxyUsed === true - ); - if (proxyConfigured) { - isCoolifyProxyUsed = !!proxyConfigured.isCoolifyProxyUsed; + let { name, network, engine, isCoolifyProxyUsed, remoteIpAddress, remoteUser, remotePort } = request.body + if (id === 'new') { + console.log(engine) + if (engine) { + const { stdout } = await asyncExecShell(`DOCKER_HOST=unix:///var/run/docker.sock docker network ls --filter 'name=^${network}$' --format '{{json .}}'`); + if (stdout === '') { + await asyncExecShell(`DOCKER_HOST=unix:///var/run/docker.sock docker network create --attachable ${network}`); } - await prisma.destinationDocker.updateMany({ where: { engine }, data: { isCoolifyProxyUsed } }); - } - if (isCoolifyProxyUsed) { - const settings = await prisma.setting.findFirst(); - if (settings?.isTraefikUsed) { - await startTraefikProxy(engine); - } else { - await startCoolifyProxy(engine); + await prisma.destinationDocker.create({ + data: { name, teams: { connect: { id: teamId } }, engine, network, isCoolifyProxyUsed } + }); + const destinations = await prisma.destinationDocker.findMany({ where: { engine } }); + const destination = destinations.find((destination) => destination.network === network); + if (destinations.length > 0) { + const proxyConfigured = destinations.find( + (destination) => destination.network !== network && destination.isCoolifyProxyUsed === true + ); + if (proxyConfigured) { + isCoolifyProxyUsed = !!proxyConfigured.isCoolifyProxyUsed; + } + await prisma.destinationDocker.updateMany({ where: { engine }, data: { isCoolifyProxyUsed } }); } + if (isCoolifyProxyUsed) { + await startTraefikProxy(destination.id); + } + return reply.code(201).send({ id: destination.id }); + } else { + const destination = await prisma.destinationDocker.create({ + data: { name, teams: { connect: { id: teamId } }, engine, network, isCoolifyProxyUsed, remoteEngine: true, remoteIpAddress, remoteUser, remotePort } + }); + return reply.code(201).send({ id: destination.id }) } - return reply.code(201).send({ id: destination.id }); } else { await prisma.destinationDocker.update({ where: { id }, data: { name, engine, network } }); return reply.code(201).send(); } } catch ({ status, message }) { + console.log({ status, message }) return errorHandler({ status, message }) } } export async function deleteDestination(request: FastifyRequest) { try { const { id } = request.params - const destination = await prisma.destinationDocker.delete({ where: { id } }); - if (destination.isCoolifyProxyUsed) { - const host = getEngine(destination.engine); - const { network } = destination; - const settings = await prisma.setting.findFirst(); - const containerName = settings.isTraefikUsed ? 'coolify-proxy' : 'coolify-haproxy'; - const { stdout: found } = await asyncExecShell( - `DOCKER_HOST=${host} docker ps -a --filter network=${network} --filter name=${containerName} --format '{{.}}'` - ); - if (found) { - await asyncExecShell( - `DOCKER_HOST="${host}" docker network disconnect ${network} ${containerName}` - ); - await asyncExecShell(`DOCKER_HOST="${host}" docker network rm ${network}`); + const { network, remoteVerified, engine, isCoolifyProxyUsed } = await prisma.destinationDocker.findUnique({ where: { id } }); + if (isCoolifyProxyUsed) { + if (engine || remoteVerified) { + const { stdout: found } = await executeDockerCmd({ + dockerId: id, + command: `docker ps -a --filter network=${network} --filter name=coolify-proxy --format '{{.}}'` + }) + if (found) { + await executeDockerCmd({ dockerId: id, command: `docker network disconnect ${network} coolify-proxy` }) + await executeDockerCmd({ dockerId: id, command: `docker network rm ${network}` }) + } } } + await prisma.destinationDocker.delete({ where: { id } }); return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -163,34 +158,83 @@ export async function saveDestinationSettings(request: FastifyRequest) { - const { engine } = request.body; + const { id } = request.params try { - await startTraefikProxy(engine); + await startTraefikProxy(id); return {} } catch ({ status, message }) { - await stopTraefikProxy(engine); + console.log({ status, message }) + await stopTraefikProxy(id); return errorHandler({ status, message }) } } export async function stopProxy(request: FastifyRequest) { - const { engine } = request.body; + const { id } = request.params try { - await stopTraefikProxy(engine); + await stopTraefikProxy(id); return {} } catch ({ status, message }) { return errorHandler({ status, message }) } } export async function restartProxy(request: FastifyRequest) { - const { engine } = request.body; + const { id } = request.params try { - await stopTraefikProxy(engine); - await startTraefikProxy(engine); - await prisma.destinationDocker.updateMany({ - where: { engine }, + await stopTraefikProxy(id); + await startTraefikProxy(id); + await prisma.destinationDocker.update({ + where: { id }, data: { isCoolifyProxyUsed: true } }); return {} + } catch ({ status, message }) { + await prisma.destinationDocker.update({ + where: { id }, + data: { isCoolifyProxyUsed: false } + }); + return errorHandler({ status, message }) + } +} + +export async function assignSSHKey(request: FastifyRequest) { + try { + const { id: sshKeyId } = request.body; + const { id } = request.params; + await prisma.destinationDocker.update({ where: { id }, data: { sshKey: { connect: { id: sshKeyId } } } }) + return {} + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} +export async function verifyRemoteDockerEngine(request: FastifyRequest, reply: FastifyReply) { + try { + const { id } = request.params; + await createRemoteEngineConfiguration(id); + + const { remoteIpAddress, remoteUser, network } = await prisma.destinationDocker.findFirst({ where: { id } }) + const host = `ssh://${remoteUser}@${remoteIpAddress}` + const { stdout } = await asyncExecShell(`DOCKER_HOST=${host} docker network ls --filter 'name=${network}' --no-trunc --format "{{json .}}"`); + + if (!stdout) { + await asyncExecShell(`DOCKER_HOST=${host} docker network create --attachable ${network}`); + } + + await prisma.destinationDocker.update({ where: { id }, data: { remoteVerified: true } }) + return reply.code(201).send() + + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} + +export async function getDestinationStatus(request: FastifyRequest) { + try { + const { id } = request.params + const destination = await prisma.destinationDocker.findUnique({ where: { id } }) + const isRunning = await checkContainer({ dockerId: destination.id, container: 'coolify-proxy' }) + return { + isRunning + } } catch ({ status, message }) { return errorHandler({ status, message }) } diff --git a/apps/api/src/routes/api/v1/destinations/index.ts b/apps/api/src/routes/api/v1/destinations/index.ts index 43440cc1c..007242695 100644 --- a/apps/api/src/routes/api/v1/destinations/index.ts +++ b/apps/api/src/routes/api/v1/destinations/index.ts @@ -1,24 +1,29 @@ import { FastifyPluginAsync } from 'fastify'; -import { checkDestination, deleteDestination, getDestination, listDestinations, newDestination, restartProxy, saveDestinationSettings, startProxy, stopProxy } from './handlers'; +import { assignSSHKey, checkDestination, deleteDestination, getDestination, getDestinationStatus, listDestinations, newDestination, restartProxy, saveDestinationSettings, startProxy, stopProxy, verifyRemoteDockerEngine } from './handlers'; import type { OnlyId } from '../../../../types'; -import type { CheckDestination, NewDestination, Proxy, SaveDestinationSettings } from './types'; +import type { CheckDestination, ListDestinations, NewDestination, Proxy, SaveDestinationSettings } from './types'; const root: FastifyPluginAsync = async (fastify): Promise => { fastify.addHook('onRequest', async (request) => { return await request.jwtVerify() }) - fastify.get('/', async (request) => await listDestinations(request)); + fastify.get('/', async (request) => await listDestinations(request)); fastify.post('/check', async (request) => await checkDestination(request)); fastify.get('/:id', async (request) => await getDestination(request)); fastify.post('/:id', async (request, reply) => await newDestination(request, reply)); fastify.delete('/:id', async (request) => await deleteDestination(request)); + fastify.get('/:id/status', async (request) => await getDestinationStatus(request)); - fastify.post('/:id/settings', async (request, reply) => await saveDestinationSettings(request)); - fastify.post('/:id/start', async (request, reply) => await startProxy(request)); - fastify.post('/:id/stop', async (request, reply) => await stopProxy(request)); - fastify.post('/:id/restart', async (request, reply) => await restartProxy(request)); + fastify.post('/:id/settings', async (request) => await saveDestinationSettings(request)); + fastify.post('/:id/start', async (request,) => await startProxy(request)); + fastify.post('/:id/stop', async (request) => await stopProxy(request)); + fastify.post('/:id/restart', async (request) => await restartProxy(request)); + + fastify.post('/:id/configuration/sshKey', async (request) => await assignSSHKey(request)); + + fastify.post('/:id/verify', async (request, reply) => await verifyRemoteDockerEngine(request, reply)); }; export default root; diff --git a/apps/api/src/routes/api/v1/destinations/types.ts b/apps/api/src/routes/api/v1/destinations/types.ts index 25691b9d8..fe2742218 100644 --- a/apps/api/src/routes/api/v1/destinations/types.ts +++ b/apps/api/src/routes/api/v1/destinations/types.ts @@ -1,5 +1,10 @@ import { OnlyId } from "../../../../types" +export interface ListDestinations { + Querystring: { + onlyVerified: string + } +} export interface CheckDestination { Body: { network: string @@ -20,7 +25,5 @@ export interface SaveDestinationSettings extends OnlyId { } } export interface Proxy extends OnlyId { - Body: { - engine: string - } + } \ No newline at end of file diff --git a/apps/api/src/routes/api/v1/handlers.ts b/apps/api/src/routes/api/v1/handlers.ts index 4b62f3845..b55f8e8b4 100644 --- a/apps/api/src/routes/api/v1/handlers.ts +++ b/apps/api/src/routes/api/v1/handlers.ts @@ -17,7 +17,8 @@ export async function hashPassword(password: string): Promise { export async function cleanupManually() { try { - await cleanupDockerStorage('unix:///var/run/docker.sock', true, true) + const destination = await prisma.destinationDocker.findFirst({ where: { engine: '/var/run/docker.sock' } }) + await cleanupDockerStorage(destination.id, true, true) return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -30,11 +31,14 @@ export async function checkUpdate(request: FastifyRequest) { const { data: versions } = await axios.get( `https://get.coollabs.io/versions.json?appId=${process.env['COOLIFY_APP_ID']}&version=${currentVersion}` ); - const latestVersion = - isStaging - ? versions['coolify'].next.version - : versions['coolify'].main.version; + const latestVersion = versions['coolify'].main.version const isUpdateAvailable = compare(latestVersion, currentVersion); + if (isStaging) { + return { + isUpdateAvailable: true, + latestVersion: 'next' + } + } return { isUpdateAvailable: isStaging ? true : isUpdateAvailable === 1, latestVersion @@ -154,7 +158,6 @@ export async function login(request: FastifyRequest, reply: FastifyReply) } if (userFound) { if (userFound.type === 'email') { - // TODO: Review this one if (userFound.password === 'RESETME') { const hashedPassword = await hashPassword(password); if (userFound.updatedAt < new Date(Date.now() - 1000 * 60 * 10)) { diff --git a/apps/api/src/routes/api/v1/iam/handlers.ts b/apps/api/src/routes/api/v1/iam/handlers.ts index 7009ec794..72d648793 100644 --- a/apps/api/src/routes/api/v1/iam/handlers.ts +++ b/apps/api/src/routes/api/v1/iam/handlers.ts @@ -273,11 +273,15 @@ export async function inviteToTeam(request: FastifyRequest, reply: const { email, permission, teamId, teamName } = request.body; const userFound = await prisma.user.findUnique({ where: { email } }); if (!userFound) { - throw `No user found with '${email}' email address.` + throw { + message: `No user found with '${email}' email address.` + }; } const uid = userFound.id; - if (uid === userId) { - throw `Invitation to yourself? Whaaaaat?` + if (uid === userId) { + throw { + message: `Invitation to yourself? Whaaaaat?` + }; } const alreadyInTeam = await prisma.team.findFirst({ where: { id: teamId, users: { some: { id: uid } } } diff --git a/apps/api/src/routes/api/v1/services/handlers.ts b/apps/api/src/routes/api/v1/services/handlers.ts index bf9c2f71f..8d07ac70b 100644 --- a/apps/api/src/routes/api/v1/services/handlers.ts +++ b/apps/api/src/routes/api/v1/services/handlers.ts @@ -2,13 +2,13 @@ import type { FastifyReply, FastifyRequest } from 'fastify'; import fs from 'fs/promises'; import yaml from 'js-yaml'; import bcrypt from 'bcryptjs'; -import { prisma, uniqueName, asyncExecShell, getServiceImage, getServiceImages, configureServiceType, getServiceFromDB, getContainerUsage, removeService, isDomainConfigured, saveUpdateableFields, fixType, decrypt, encrypt, getServiceMainPort, createDirectories, ComposeFile, makeLabelForServices, getFreePort, getDomain, errorHandler, generatePassword, isDev, stopTcpHttpProxy, supportedServiceTypesAndVersions } from '../../../../lib/common'; +import { prisma, uniqueName, asyncExecShell, getServiceImage, configureServiceType, getServiceFromDB, getContainerUsage, removeService, isDomainConfigured, saveUpdateableFields, fixType, decrypt, encrypt, getServiceMainPort, createDirectories, ComposeFile, makeLabelForServices, getFreePublicPort, getDomain, errorHandler, generatePassword, isDev, stopTcpHttpProxy, supportedServiceTypesAndVersions, executeDockerCmd, listSettings, getFreeExposedPort, checkDomainsIsValidInDNS, persistentVolumes } from '../../../../lib/common'; import { day } from '../../../../lib/dayjs'; -import { checkContainer, dockerInstance, getEngine, removeContainer } from '../../../../lib/docker'; +import { checkContainer, isContainerExited, removeContainer } from '../../../../lib/docker'; import cuid from 'cuid'; import type { OnlyId } from '../../../../types'; -import type { ActivateWordpressFtp, CheckService, DeleteServiceSecret, DeleteServiceStorage, GetServiceLogs, SaveService, SaveServiceDestination, SaveServiceSecret, SaveServiceSettings, SaveServiceStorage, SaveServiceType, SaveServiceVersion, ServiceStartStop, SetWordpressSettings } from './types'; +import type { ActivateWordpressFtp, CheckService, CheckServiceDomain, DeleteServiceSecret, DeleteServiceStorage, GetServiceLogs, SaveService, SaveServiceDestination, SaveServiceSecret, SaveServiceSettings, SaveServiceStorage, SaveServiceType, SaveServiceVersion, ServiceStartStop, SetWordpressSettings } from './types'; // async function startServiceNew(request: FastifyRequest) { // try { @@ -145,15 +145,10 @@ import type { ActivateWordpressFtp, CheckService, DeleteServiceSecret, DeleteSer export async function listServices(request: FastifyRequest) { try { const teamId = request.user.teamId; - let services = [] - if (teamId === '0') { - services = await prisma.service.findMany({ include: { teams: true } }); - } else { - services = await prisma.service.findMany({ - where: { teams: { some: { id: teamId } } }, - include: { teams: true } - }); - } + const services = await prisma.service.findMany({ + where: { teams: { some: { id: teamId === '0' ? undefined : teamId } } }, + include: { teams: true, destinationDocker: true } + }); return { services } @@ -172,43 +167,41 @@ export async function newService(request: FastifyRequest, reply: FastifyReply) { return errorHandler({ status, message }) } } +export async function getServiceStatus(request: FastifyRequest) { + try { + const teamId = request.user.teamId; + const { id } = request.params; + + let isRunning = false; + let isExited = false + + const service = await getServiceFromDB({ id, teamId }); + const { destinationDockerId, settings } = service; + + if (destinationDockerId) { + isRunning = await checkContainer({ dockerId: service.destinationDocker.id, container: id }); + isExited = await isContainerExited(service.destinationDocker.id, id); + } + return { + isRunning, + isExited, + settings + } + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} + export async function getService(request: FastifyRequest) { try { const teamId = request.user.teamId; const { id } = request.params; const service = await getServiceFromDB({ id, teamId }); - + const settings = await listSettings() if (!service) { throw { status: 404, message: 'Service not found.' } } - - const { destinationDockerId, destinationDocker, type, version, settings } = service; - let isRunning = false; - if (destinationDockerId) { - const host = getEngine(destinationDocker.engine); - const docker = dockerInstance({ destinationDocker }); - const baseImage = getServiceImage(type); - const images = getServiceImages(type); - docker.engine.pull(`${baseImage}:${version}`); - if (images?.length > 0) { - for (const image of images) { - docker.engine.pull(`${image}:latest`); - } - } - try { - const { stdout } = await asyncExecShell( - `DOCKER_HOST=${host} docker inspect --format '{{json .State}}' ${id}` - ); - - if (JSON.parse(stdout).Running) { - isRunning = true; - } - } catch (error) { - // - } - } return { - isRunning, service, settings } @@ -282,7 +275,7 @@ export async function getServiceUsage(request: FastifyRequest) { const service = await getServiceFromDB({ id, teamId }); if (service.destinationDockerId) { - [usage] = await Promise.all([getContainerUsage(service.destinationDocker.engine, id)]); + [usage] = await Promise.all([getContainerUsage(service.destinationDocker.id, id)]); } return { usage @@ -299,33 +292,22 @@ export async function getServiceLogs(request: FastifyRequest) { if (since !== 0) { since = day(since).unix(); } - const { destinationDockerId, destinationDocker } = await prisma.service.findUnique({ + const { destinationDockerId, destinationDocker: { id: dockerId } } = await prisma.service.findUnique({ where: { id }, include: { destinationDocker: true } }); if (destinationDockerId) { - const docker = dockerInstance({ destinationDocker }); try { - const container = await docker.engine.getContainer(id); - if (container) { - const { default: ansi } = await import('strip-ansi') - const logs = ( - await container.logs({ - stdout: true, - stderr: true, - timestamps: true, - since, - tail: 5000 - }) - ) - .toString() - .split('\n') - .map((l) => ansi(l.slice(8))) - .filter((a) => a); - return { - logs - }; - } + // const found = await checkContainer({ dockerId, container: id }) + // if (found) { + const { default: ansi } = await import('strip-ansi') + const { stdout, stderr } = await executeDockerCmd({ dockerId, command: `docker logs --since ${since} --tail 5000 --timestamps ${id}` }) + const stripLogsStdout = stdout.toString().split('\n').map((l) => ansi(l)).filter((a) => a); + const stripLogsStderr = stderr.toString().split('\n').map((l) => ansi(l)).filter((a) => a); + const logs = stripLogsStderr.concat(stripLogsStdout) + const sortedLogs = logs.sort((a, b) => (day(a.split(' ')[0]).isAfter(day(b.split(' ')[0])) ? 1 : -1)) + return { logs: sortedLogs } + // } } catch (error) { const { statusCode } = error; if (statusCode === 404) { @@ -364,40 +346,57 @@ export async function saveServiceSettings(request: FastifyRequest) { + try { + const { id } = request.params + const { domain } = request.query + const { fqdn, dualCerts } = await prisma.service.findUnique({ where: { id } }) + return await checkDomainsIsValidInDNS({ hostname: domain, fqdn, dualCerts }); + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} export async function checkService(request: FastifyRequest) { try { const { id } = request.params; - let { fqdn, exposePort, otherFqdns } = request.body; + let { fqdn, exposePort, forceSave, otherFqdns, dualCerts } = request.body; if (fqdn) fqdn = fqdn.toLowerCase(); if (otherFqdns && otherFqdns.length > 0) otherFqdns = otherFqdns.map((f) => f.toLowerCase()); if (exposePort) exposePort = Number(exposePort); - let found = await isDomainConfigured({ id, fqdn }); + const { destinationDocker: { id: dockerId, remoteIpAddress, remoteEngine }, exposePort: configuredPort } = await prisma.service.findUnique({ where: { id }, include: { destinationDocker: true } }) + const { isDNSCheckEnabled } = await prisma.setting.findFirst({}); + + let found = await isDomainConfigured({ id, fqdn, remoteIpAddress }); if (found) { throw { status: 500, message: `Domain ${getDomain(fqdn).replace('www.', '')} is already in use!` } } if (otherFqdns && otherFqdns.length > 0) { for (const ofqdn of otherFqdns) { - found = await isDomainConfigured({ id, fqdn: ofqdn, checkOwn: true }); + found = await isDomainConfigured({ id, fqdn: ofqdn, remoteIpAddress }); if (found) { throw { status: 500, message: `Domain ${getDomain(ofqdn).replace('www.', '')} is already in use!` } } } } if (exposePort) { - const { default: getPort } = await import('get-port'); - exposePort = Number(exposePort); - if (exposePort < 1024 || exposePort > 65535) { throw { status: 500, message: `Exposed Port needs to be between 1024 and 65535.` } } - const publicPort = await getPort({ port: exposePort }); - if (publicPort !== exposePort) { - throw { status: 500, message: `Port ${exposePort} is already in use.` } + if (configuredPort !== exposePort) { + const availablePort = await getFreeExposedPort(id, exposePort, dockerId, remoteIpAddress); + if (availablePort.toString() !== exposePort.toString()) { + throw { status: 500, message: `Port ${exposePort} is already in use.` } + } } } + if (isDNSCheckEnabled && !isDev && !forceSave) { + let hostname = request.hostname.split(':')[0]; + if (remoteEngine) hostname = remoteIpAddress; + return await checkDomainsIsValidInDNS({ hostname, fqdn, dualCerts }); + } return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -686,6 +685,7 @@ async function startPlausibleAnalyticsService(request: FastifyRequest + + `; const clickhouseUserConfigXml = ` @@ -787,12 +788,16 @@ COPY ./init.query /docker-entrypoint-initdb.d/init.query COPY ./init-db.sh /docker-entrypoint-initdb.d/init-db.sh`; await fs.writeFile(`${workdir}/Dockerfile`, Dockerfile); + + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config.plausibleAnalytics) + const composeFile: ComposeFile = { version: '3.8', services: { [id]: { container_name: id, image: config.plausibleAnalytics.image, + volumes, command: 'sh -c "sleep 10 && /entrypoint.sh db createdb && /entrypoint.sh db migrate && /entrypoint.sh db init-admin && /entrypoint.sh run"', networks: [network], @@ -849,6 +854,7 @@ COPY ./init-db.sh /docker-entrypoint-initdb.d/init-db.sh`; } }, volumes: { + ...volumeMounts, [config.postgresql.volume.split(':')[0]]: { name: config.postgresql.volume.split(':')[0] }, @@ -859,10 +865,8 @@ COPY ./init-db.sh /docker-entrypoint-initdb.d/init-db.sh`; }; const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell( - `DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up --build -d` - ); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -877,17 +881,17 @@ async function stopPlausibleAnalyticsService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort } = + const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('nocodb'); const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -921,6 +924,7 @@ async function startNocodbService(request: FastifyRequest) { config.environmentVariables[secret.name] = secret.value; }); } + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config) const composeFile: ComposeFile = { version: '3.8', services: { @@ -928,7 +932,7 @@ async function startNocodbService(request: FastifyRequest) { container_name: id, image: config.image, networks: [network], - volumes: [config.volume], + volumes, environment: config.environmentVariables, restart: 'always', ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), @@ -948,16 +952,12 @@ async function startNocodbService(request: FastifyRequest) { external: true } }, - volumes: { - [config.volume.split(':')[0]]: { - name: config.volume.split(':')[0] - } - } + volumes: volumeMounts }; const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -970,10 +970,9 @@ async function stopNocodbService(request: FastifyRequest) { const service = await getServiceFromDB({ id, teamId }); const { destinationDockerId, destinationDocker, fqdn } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } return {} @@ -993,16 +992,17 @@ async function startMinioService(request: FastifyRequest) { fqdn, destinationDockerId, destinationDocker, + persistentStorage, exposePort, minio: { rootUser, rootUserPassword }, serviceSecret } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('minio'); - const publicPort = await getFreePort(); + const { service: { destinationDocker: { id: dockerId } } } = await prisma.minio.findUnique({ where: { serviceId: id }, include: { service: { include: { destinationDocker: true } } } }) + const publicPort = await getFreePublicPort(id, dockerId); const consolePort = 9001; const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -1022,6 +1022,7 @@ async function startMinioService(request: FastifyRequest) { config.environmentVariables[secret.name] = secret.value; }); } + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config) const composeFile: ComposeFile = { version: '3.8', services: { @@ -1031,7 +1032,7 @@ async function startMinioService(request: FastifyRequest) { command: `server /data --console-address ":${consolePort}"`, environment: config.environmentVariables, networks: [network], - volumes: [config.volume], + volumes, restart: 'always', ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), labels: makeLabelForServices('minio'), @@ -1050,16 +1051,12 @@ async function startMinioService(request: FastifyRequest) { external: true } }, - volumes: { - [config.volume.split(':')[0]]: { - name: config.volume.split(':')[0] - } - } + volumes: volumeMounts }; const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) await prisma.minio.update({ where: { serviceId: id }, data: { publicPort } }); return {} } catch ({ status, message }) { @@ -1071,13 +1068,12 @@ async function stopMinioService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; await prisma.minio.update({ where: { serviceId: id }, data: { publicPort: null } }) if (destinationDockerId) { - const engine = destinationDocker.engine; - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } return {} @@ -1103,7 +1099,6 @@ async function startVscodeService(request: FastifyRequest) { } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('vscodeserver'); const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -1121,28 +1116,8 @@ async function startVscodeService(request: FastifyRequest) { config.environmentVariables[secret.name] = secret.value; }); } + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config) - const volumes = - persistentStorage?.map((storage) => { - return `${id}${storage.path.replace(/\//gi, '-')}:${storage.path}`; - }) || []; - - const composeVolumes = volumes.map((volume) => { - return { - [`${volume.split(':')[0]}`]: { - name: volume.split(':')[0] - } - }; - }); - const volumeMounts = Object.assign( - {}, - { - [config.volume.split(':')[0]]: { - name: config.volume.split(':')[0] - } - }, - ...composeVolumes - ); const composeFile: ComposeFile = { version: '3.8', services: { @@ -1151,7 +1126,7 @@ async function startVscodeService(request: FastifyRequest) { image: config.image, environment: config.environmentVariables, networks: [network], - volumes: [config.volume, ...volumes], + volumes, restart: 'always', ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), labels: makeLabelForServices('vscodeServer'), @@ -1175,16 +1150,16 @@ async function startVscodeService(request: FastifyRequest) { const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) const changePermissionOn = persistentStorage.map((p) => p.path); if (changePermissionOn.length > 0) { - await asyncExecShell( - `DOCKER_HOST=${host} docker exec -u root ${id} chown -R 1000:1000 ${changePermissionOn.join( + await executeDockerCmd({ + dockerId: destinationDocker.id, command: `docker exec -u root ${id} chown -R 1000:1000 ${changePermissionOn.join( ' ' )}` - ); + }) } return {} } catch ({ status, message }) { @@ -1196,12 +1171,11 @@ async function stopVscodeService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } return {} @@ -1221,6 +1195,7 @@ async function startWordpressService(request: FastifyRequest) destinationDockerId, serviceSecret, destinationDocker, + persistentStorage, exposePort, wordpress: { mysqlDatabase, @@ -1236,7 +1211,6 @@ async function startWordpressService(request: FastifyRequest) } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const image = getServiceImage(type); const port = getServiceMainPort('wordpress'); @@ -1270,6 +1244,9 @@ async function startWordpressService(request: FastifyRequest) config.wordpress.environmentVariables[secret.name] = secret.value; }); } + + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config.wordpress) + let composeFile: ComposeFile = { version: '3.8', services: { @@ -1277,7 +1254,7 @@ async function startWordpressService(request: FastifyRequest) container_name: id, image: config.wordpress.image, environment: config.wordpress.environmentVariables, - volumes: [config.wordpress.volume], + volumes, networks: [network], restart: 'always', ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), @@ -1297,11 +1274,7 @@ async function startWordpressService(request: FastifyRequest) external: true } }, - volumes: { - [config.wordpress.volume.split(':')[0]]: { - name: config.wordpress.volume.split(':')[0] - } - } + volumes: volumeMounts }; if (!ownMysql) { composeFile.services[id].depends_on = [`${id}-mysql`]; @@ -1328,8 +1301,10 @@ async function startWordpressService(request: FastifyRequest) } const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) + return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -1346,28 +1321,27 @@ async function stopWordpressService(request: FastifyRequest) { wordpress: { ftpEnabled } } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); } try { - const found = await checkContainer(engine, `${id}-mysql`); + const found = await checkContainer({ dockerId: destinationDocker.id, container: `${id}-mysql` }); if (found) { - await removeContainer({ id: `${id}-mysql`, engine }); + await removeContainer({ id: `${id}-mysql`, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); } try { if (ftpEnabled) { - const found = await checkContainer(engine, `${id}-ftp`); + const found = await checkContainer({ dockerId: destinationDocker.id, container: `${id}-ftp` }); if (found) { - await removeContainer({ id: `${id}-ftp`, engine }); + await removeContainer({ id: `${id}-ftp`, dockerId: destinationDocker.id }); } await prisma.wordpress.update({ where: { serviceId: id }, @@ -1389,11 +1363,10 @@ async function startVaultwardenService(request: FastifyRequest const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort } = + const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('vaultwarden'); const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -1409,6 +1382,7 @@ async function startVaultwardenService(request: FastifyRequest config.environmentVariables[secret.name] = secret.value; }); } + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config) const composeFile: ComposeFile = { version: '3.8', services: { @@ -1417,7 +1391,7 @@ async function startVaultwardenService(request: FastifyRequest image: config.image, environment: config.environmentVariables, networks: [network], - volumes: [config.volume], + volumes, restart: 'always', ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), labels: makeLabelForServices('vaultWarden'), @@ -1436,16 +1410,14 @@ async function startVaultwardenService(request: FastifyRequest external: true } }, - volumes: { - [config.volume.split(':')[0]]: { - name: config.volume.split(':')[0] - } - } + volumes: volumeMounts }; const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) + return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -1456,14 +1428,12 @@ async function stopVaultwardenService(request: FastifyRequest) const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -1480,10 +1450,9 @@ async function startLanguageToolService(request: FastifyRequest const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -1572,10 +1537,9 @@ async function startN8nService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort } = + const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('n8n'); const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -1593,6 +1557,7 @@ async function startN8nService(request: FastifyRequest) { config.environmentVariables[secret.name] = secret.value; }); } + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config) const composeFile: ComposeFile = { version: '3.8', services: { @@ -1600,7 +1565,7 @@ async function startN8nService(request: FastifyRequest) { container_name: id, image: config.image, networks: [network], - volumes: [config.volume], + volumes, environment: config.environmentVariables, restart: 'always', labels: makeLabelForServices('n8n'), @@ -1620,16 +1585,14 @@ async function startN8nService(request: FastifyRequest) { external: true } }, - volumes: { - [config.volume.split(':')[0]]: { - name: config.volume.split(':')[0] - } - } + volumes: volumeMounts }; const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) + return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -1640,14 +1603,12 @@ async function stopN8nService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -1664,10 +1625,9 @@ async function startUptimekumaService(request: FastifyRequest) const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort } = + const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('uptimekuma'); const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -1683,6 +1643,7 @@ async function startUptimekumaService(request: FastifyRequest) config.environmentVariables[secret.name] = secret.value; }); } + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config) const composeFile: ComposeFile = { version: '3.8', services: { @@ -1690,7 +1651,7 @@ async function startUptimekumaService(request: FastifyRequest) container_name: id, image: config.image, networks: [network], - volumes: [config.volume], + volumes, environment: config.environmentVariables, restart: 'always', ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), @@ -1710,17 +1671,14 @@ async function startUptimekumaService(request: FastifyRequest) external: true } }, - volumes: { - [config.volume.split(':')[0]]: { - name: config.volume.split(':')[0] - } - } + volumes: volumeMounts }; const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) + return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -1731,14 +1689,12 @@ async function stopUptimekumaService(request: FastifyRequest) const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -1761,6 +1717,7 @@ async function startGhostService(request: FastifyRequest) { destinationDockerId, destinationDocker, serviceSecret, + persistentStorage, exposePort, fqdn, ghost: { @@ -1774,7 +1731,6 @@ async function startGhostService(request: FastifyRequest) { } } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const { workdir } = await createDirectories({ repository: type, buildId: id }); const image = getServiceImage(type); @@ -1815,6 +1771,8 @@ async function startGhostService(request: FastifyRequest) { config.ghost.environmentVariables[secret.name] = secret.value; }); } + + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config.ghost) const composeFile: ComposeFile = { version: '3.8', services: { @@ -1822,7 +1780,7 @@ async function startGhostService(request: FastifyRequest) { container_name: id, image: config.ghost.image, networks: [network], - volumes: [config.ghost.volume], + volumes, environment: config.ghost.environmentVariables, restart: 'always', ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), @@ -1860,9 +1818,7 @@ async function startGhostService(request: FastifyRequest) { } }, volumes: { - [config.ghost.volume.split(':')[0]]: { - name: config.ghost.volume.split(':')[0] - }, + ...volumeMounts, [config.mariadb.volume.split(':')[0]]: { name: config.mariadb.volume.split(':')[0] } @@ -1871,8 +1827,9 @@ async function startGhostService(request: FastifyRequest) { const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) + return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -1883,18 +1840,16 @@ async function stopGhostService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - let found = await checkContainer(engine, id); + let found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } - found = await checkContainer(engine, `${id}-mariadb`); + found = await checkContainer({ dockerId: destinationDocker.id, container: `${id}-mariadb` }); if (found) { - await removeContainer({ id: `${id}-mariadb`, engine }); + await removeContainer({ id: `${id}-mariadb`, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -1914,10 +1869,9 @@ async function startMeilisearchService(request: FastifyRequest const { meiliSearch: { masterKey } } = service; - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort } = + const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('meilisearch'); const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -1936,6 +1890,7 @@ async function startMeilisearchService(request: FastifyRequest config.environmentVariables[secret.name] = secret.value; }); } + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config) const composeFile: ComposeFile = { version: '3.8', services: { @@ -1946,7 +1901,7 @@ async function startMeilisearchService(request: FastifyRequest environment: config.environmentVariables, restart: 'always', ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: [config.volume], + volumes, labels: makeLabelForServices('meilisearch'), deploy: { restart_policy: { @@ -1963,17 +1918,12 @@ async function startMeilisearchService(request: FastifyRequest external: true } }, - volumes: { - [config.volume.split(':')[0]]: { - name: config.volume.split(':')[0] - } - } + volumes: volumeMounts }; const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -1984,14 +1934,12 @@ async function stopMeilisearchService(request: FastifyRequest) const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -2014,6 +1962,7 @@ async function startUmamiService(request: FastifyRequest) { destinationDockerId, destinationDocker, serviceSecret, + persistentStorage, exposePort, umami: { umamiAdminPassword, @@ -2024,7 +1973,6 @@ async function startUmamiService(request: FastifyRequest) { } } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('umami'); const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -2138,6 +2086,7 @@ async function startUmamiService(request: FastifyRequest) { FROM ${config.postgresql.image} COPY ./schema.postgresql.sql /docker-entrypoint-initdb.d/schema.postgresql.sql`; await fs.writeFile(`${workdir}/Dockerfile`, Dockerfile); + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config.umami) const composeFile: ComposeFile = { version: '3.8', services: { @@ -2146,7 +2095,7 @@ async function startUmamiService(request: FastifyRequest) { image: config.umami.image, environment: config.umami.environmentVariables, networks: [network], - volumes: [], + volumes, restart: 'always', ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), labels: makeLabelForServices('umami'), @@ -2183,6 +2132,7 @@ async function startUmamiService(request: FastifyRequest) { } }, volumes: { + ...volumeMounts, [config.postgresql.volume.split(':')[0]]: { name: config.postgresql.volume.split(':')[0] } @@ -2190,9 +2140,8 @@ async function startUmamiService(request: FastifyRequest) { }; const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -2203,22 +2152,20 @@ async function stopUmamiService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); } try { - const found = await checkContainer(engine, `${id}-postgresql`); + const found = await checkContainer({ dockerId: destinationDocker.id, container: `${id}-postgresql` }); if (found) { - await removeContainer({ id: `${id}-postgresql`, engine }); + await removeContainer({ id: `${id}-postgresql`, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -2240,12 +2187,12 @@ async function startHasuraService(request: FastifyRequest) { version, destinationDockerId, destinationDocker, + persistentStorage, serviceSecret, exposePort, hasura: { postgresqlUser, postgresqlPassword, postgresqlDatabase } } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('hasura'); const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -2274,6 +2221,7 @@ async function startHasuraService(request: FastifyRequest) { }); } + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config.hasura) const composeFile: ComposeFile = { version: '3.8', services: { @@ -2282,7 +2230,7 @@ async function startHasuraService(request: FastifyRequest) { image: config.hasura.image, environment: config.hasura.environmentVariables, networks: [network], - volumes: [], + volumes, restart: 'always', labels: makeLabelForServices('hasura'), ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), @@ -2319,6 +2267,7 @@ async function startHasuraService(request: FastifyRequest) { } }, volumes: { + ...volumeMounts, [config.postgresql.volume.split(':')[0]]: { name: config.postgresql.volume.split(':')[0] } @@ -2327,8 +2276,9 @@ async function startHasuraService(request: FastifyRequest) { const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) + return {} } catch ({ status, message }) { return errorHandler({ status, message }) @@ -2339,22 +2289,20 @@ async function stopHasuraService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); } try { - const found = await checkContainer(engine, `${id}-postgresql`); + const found = await checkContainer({ dockerId: destinationDocker.id, container: `${id}-postgresql` }); if (found) { - await removeContainer({ id: `${id}-postgresql`, engine }); + await removeContainer({ id: `${id}-postgresql`, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -2378,6 +2326,7 @@ async function startFiderService(request: FastifyRequest) { destinationDockerId, destinationDocker, serviceSecret, + persistentStorage, exposePort, fider: { postgresqlUser, @@ -2396,7 +2345,6 @@ async function startFiderService(request: FastifyRequest) { } } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('fider'); const { workdir } = await createDirectories({ repository: type, buildId: id }); @@ -2434,7 +2382,7 @@ async function startFiderService(request: FastifyRequest) { config.fider.environmentVariables[secret.name] = secret.value; }); } - + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config.fider) const composeFile: ComposeFile = { version: '3.8', services: { @@ -2443,7 +2391,7 @@ async function startFiderService(request: FastifyRequest) { image: config.fider.image, environment: config.fider.environmentVariables, networks: [network], - volumes: [], + volumes, restart: 'always', labels: makeLabelForServices('fider'), ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), @@ -2480,6 +2428,7 @@ async function startFiderService(request: FastifyRequest) { } }, volumes: { + ...volumeMounts, [config.postgresql.volume.split(':')[0]]: { name: config.postgresql.volume.split(':')[0] } @@ -2488,8 +2437,8 @@ async function startFiderService(request: FastifyRequest) { const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) return {} } catch ({ status, message }) { @@ -2501,22 +2450,20 @@ async function stopFiderService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); } try { - const found = await checkContainer(engine, `${id}-postgresql`); + const found = await checkContainer({ dockerId: destinationDocker.id, container: `${id}-postgresql` }); if (found) { - await removeContainer({ id: `${id}-postgresql`, engine }); + await removeContainer({ id: `${id}-postgresql`, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -2540,6 +2487,7 @@ async function startMoodleService(request: FastifyRequest) { destinationDockerId, destinationDocker, serviceSecret, + persistentStorage, exposePort, moodle: { defaultUsername, @@ -2553,12 +2501,10 @@ async function startMoodleService(request: FastifyRequest) { } } = service; const network = destinationDockerId && destinationDocker.network; - const host = getEngine(destinationDocker.engine); const port = getServiceMainPort('moodle'); const { workdir } = await createDirectories({ repository: type, buildId: id }); const image = getServiceImage(type); - const domain = getDomain(fqdn); const config = { moodle: { image: `${image}:${version}`, @@ -2591,7 +2537,7 @@ async function startMoodleService(request: FastifyRequest) { config.moodle.environmentVariables[secret.name] = secret.value; }); } - + const { volumes, volumeMounts } = persistentVolumes(id, persistentStorage, config.moodle) const composeFile: ComposeFile = { version: '3.8', services: { @@ -2600,7 +2546,7 @@ async function startMoodleService(request: FastifyRequest) { image: config.moodle.image, environment: config.moodle.environmentVariables, networks: [network], - volumes: [], + volumes, restart: 'always', labels: makeLabelForServices('moodle'), ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), @@ -2639,9 +2585,7 @@ async function startMoodleService(request: FastifyRequest) { } }, volumes: { - [config.moodle.volume.split(':')[0]]: { - name: config.moodle.volume.split(':')[0] - }, + ...volumeMounts, [config.mariadb.volume.split(':')[0]]: { name: config.mariadb.volume.split(':')[0] } @@ -2651,8 +2595,8 @@ async function startMoodleService(request: FastifyRequest) { const composeFileDestination = `${workdir}/docker-compose.yaml`; await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} pull`); - await asyncExecShell(`DOCKER_HOST=${host} docker compose -f ${composeFileDestination} up -d`); + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) + await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) return {} } catch ({ status, message }) { @@ -2664,22 +2608,20 @@ async function stopMoodleService(request: FastifyRequest) { const { id } = request.params; const teamId = request.user.teamId; const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, fqdn } = service; + const { destinationDockerId, destinationDocker } = service; if (destinationDockerId) { - const engine = destinationDocker.engine; - try { - const found = await checkContainer(engine, id); + const found = await checkContainer({ dockerId: destinationDocker.id, container: id }); if (found) { - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); } try { - const found = await checkContainer(engine, `${id}-mariadb`); + const found = await checkContainer({ dockerId: destinationDocker.id, container: `${id}-mariadb` }); if (found) { - await removeContainer({ id: `${id}-mariadb`, engine }); + await removeContainer({ id: `${id}-mariadb`, dockerId: destinationDocker.id }); } } catch (error) { console.error(error); @@ -2702,14 +2644,10 @@ export async function activatePlausibleUsers(request: FastifyRequest, re plausibleAnalytics: { postgresqlUser, postgresqlPassword, postgresqlDatabase } } = await getServiceFromDB({ id, teamId }); if (destinationDockerId) { - const docker = dockerInstance({ destinationDocker }); - const container = await docker.engine.getContainer(id); - const command = await container.exec({ - Cmd: [ - `psql -H postgresql://${postgresqlUser}:${postgresqlPassword}@localhost:5432/${postgresqlDatabase} -c "UPDATE users SET email_verified = true;"` - ] - }); - await command.start(); + await executeDockerCmd({ + dockerId: destinationDocker.id, + command: `docker exec ${id} 'psql -H postgresql://${postgresqlUser}:${postgresqlPassword}@localhost:5432/${postgresqlDatabase} -c "UPDATE users SET email_verified = true;"'` + }) return await reply.code(201).send() } throw { status: 500, message: 'Could not activate users.' } @@ -2717,11 +2655,34 @@ export async function activatePlausibleUsers(request: FastifyRequest, re return errorHandler({ status, message }) } } +export async function cleanupPlausibleLogs(request: FastifyRequest, reply: FastifyReply) { + try { + const { id } = request.params + const teamId = request.user.teamId; + const { + destinationDockerId, + destinationDocker, + } = await getServiceFromDB({ id, teamId }); + if (destinationDockerId) { + await executeDockerCmd({ + dockerId: destinationDocker.id, + command: `docker exec ${id}-clickhouse sh -c "/usr/bin/clickhouse-client -q \\"SELECT name FROM system.tables WHERE name LIKE '%log%';\\"| xargs -I{} /usr/bin/clickhouse-client -q \"TRUNCATE TABLE system.{};\""` + }) + return await reply.code(201).send() + } + throw { status: 500, message: 'Could cleanup logs.' } + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} export async function activateWordpressFtp(request: FastifyRequest, reply: FastifyReply) { const { id } = request.params const { ftpEnabled } = request.body; - const publicPort = await getFreePort(); + const { service: { destinationDocker: { id: dockerId } } } = await prisma.wordpress.findUnique({ where: { serviceId: id }, include: { service: { include: { destinationDocker: true } } } }) + + const publicPort = await getFreePublicPort(id, dockerId); + let ftpUser = cuid(); let ftpPassword = generatePassword(); @@ -2741,7 +2702,6 @@ export async function activateWordpressFtp(request: FastifyRequest => { fastify.addHook('onRequest', async (request) => { @@ -41,6 +44,9 @@ const root: FastifyPluginAsync = async (fastify): Promise => { fastify.post('/:id', async (request, reply) => await saveService(request, reply)); fastify.delete('/:id', async (request) => await deleteService(request)); + fastify.get('/:id/status', async (request) => await getServiceStatus(request)); + + fastify.get('/:id/check', async (request) => await checkServiceDomain(request)); fastify.post('/:id/check', async (request) => await checkService(request)); fastify.post('/:id/settings', async (request, reply) => await saveServiceSettings(request, reply)); @@ -69,6 +75,7 @@ const root: FastifyPluginAsync = async (fastify): Promise => { fastify.post('/:id/:type/settings', async (request, reply) => await setSettingsService(request, reply)); fastify.post('/:id/plausibleanalytics/activate', async (request, reply) => await activatePlausibleUsers(request, reply)); + fastify.post('/:id/plausibleanalytics/cleanup', async (request, reply) => await cleanupPlausibleLogs(request, reply)); fastify.post('/:id/wordpress/ftp', async (request, reply) => await activateWordpressFtp(request, reply)); }; diff --git a/apps/api/src/routes/api/v1/services/types.ts b/apps/api/src/routes/api/v1/services/types.ts index 4ed631998..f09b4423f 100644 --- a/apps/api/src/routes/api/v1/services/types.ts +++ b/apps/api/src/routes/api/v1/services/types.ts @@ -25,9 +25,16 @@ export interface SaveServiceSettings extends OnlyId { dualCerts: boolean } } +export interface CheckServiceDomain extends OnlyId { + Querystring: { + domain: string + } +} export interface CheckService extends OnlyId { Body: { fqdn: string, + forceSave: boolean, + dualCerts: boolean, exposePort: number, otherFqdns: Array } diff --git a/apps/api/src/routes/api/v1/settings/handlers.ts b/apps/api/src/routes/api/v1/settings/handlers.ts index 0c0c727d1..073dbd7e3 100644 --- a/apps/api/src/routes/api/v1/settings/handlers.ts +++ b/apps/api/src/routes/api/v1/settings/handlers.ts @@ -1,15 +1,24 @@ import { promises as dns } from 'dns'; import type { FastifyReply, FastifyRequest } from 'fastify'; -import { checkDomainsIsValidInDNS, errorHandler, getDomain, isDNSValid, isDomainConfigured, listSettings, prisma } from '../../../../lib/common'; -import { CheckDNS, CheckDomain, DeleteDomain, SaveSettings } from './types'; +import { checkDomainsIsValidInDNS, decrypt, encrypt, errorHandler, getDomain, isDNSValid, isDomainConfigured, listSettings, prisma } from '../../../../lib/common'; +import { CheckDNS, CheckDomain, DeleteDomain, DeleteSSHKey, SaveSettings, SaveSSHKey } from './types'; export async function listAllSettings(request: FastifyRequest) { try { + const teamId = request.user.teamId; const settings = await listSettings(); + const sshKeys = await prisma.sshKey.findMany({ where: { team: { id: teamId } } }) + const unencryptedKeys = [] + if (sshKeys.length > 0) { + for (const key of sshKeys) { + unencryptedKeys.push({ id: key.id, name: key.name, privateKey: decrypt(key.privateKey), createdAt: key.createdAt }) + } + } return { - settings + settings, + sshKeys: unencryptedKeys } } catch ({ status, message }) { return errorHandler({ status, message }) @@ -68,7 +77,8 @@ export async function checkDomain(request: FastifyRequest) { throw "Domain already configured"; } if (isDNSCheckEnabled && !forceSave) { - return await checkDomainsIsValidInDNS({ hostname: request.hostname.split(':')[0], fqdn, dualCerts }); + const hostname = request.hostname.split(':')[0] + return await checkDomainsIsValidInDNS({ hostname, fqdn, dualCerts }); } return {}; } catch ({ status, message }) { @@ -83,4 +93,31 @@ export async function checkDNS(request: FastifyRequest) { } catch ({ status, message }) { return errorHandler({ status, message }) } +} + +export async function saveSSHKey(request: FastifyRequest, reply: FastifyReply) { + try { + const teamId = request.user.teamId; + const { privateKey, name } = request.body; + const found = await prisma.sshKey.findMany({ where: { name } }) + if (found.length > 0) { + throw { + message: "Name already used. Choose another one please." + } + } + const encryptedSSHKey = encrypt(privateKey) + await prisma.sshKey.create({ data: { name, privateKey: encryptedSSHKey, team: { connect: { id: teamId } } } }) + return reply.code(201).send() + } catch ({ status, message }) { + return errorHandler({ status, message }) + } +} +export async function deleteSSHKey(request: FastifyRequest, reply: FastifyReply) { + try { + const { id } = request.body; + await prisma.sshKey.delete({ where: { id } }) + return reply.code(201).send() + } catch ({ status, message }) { + return errorHandler({ status, message }) + } } \ No newline at end of file diff --git a/apps/api/src/routes/api/v1/settings/index.ts b/apps/api/src/routes/api/v1/settings/index.ts index f5181a14e..96da5948b 100644 --- a/apps/api/src/routes/api/v1/settings/index.ts +++ b/apps/api/src/routes/api/v1/settings/index.ts @@ -1,6 +1,6 @@ import { FastifyPluginAsync } from 'fastify'; -import { checkDNS, checkDomain, deleteDomain, listAllSettings, saveSettings } from './handlers'; -import { CheckDNS, CheckDomain, DeleteDomain, SaveSettings } from './types'; +import { checkDNS, checkDomain, deleteDomain, deleteSSHKey, listAllSettings, saveSettings, saveSSHKey } from './handlers'; +import { CheckDNS, CheckDomain, DeleteDomain, DeleteSSHKey, SaveSettings, SaveSSHKey } from './types'; const root: FastifyPluginAsync = async (fastify): Promise => { @@ -13,6 +13,9 @@ const root: FastifyPluginAsync = async (fastify): Promise => { fastify.get('/check', async (request) => await checkDNS(request)); fastify.post('/check', async (request) => await checkDomain(request)); + + fastify.post('/sshKey', async (request, reply) => await saveSSHKey(request, reply)); + fastify.delete('/sshKey', async (request, reply) => await deleteSSHKey(request, reply)); }; export default root; diff --git a/apps/api/src/routes/api/v1/settings/types.ts b/apps/api/src/routes/api/v1/settings/types.ts index aa7398804..a33b614a4 100644 --- a/apps/api/src/routes/api/v1/settings/types.ts +++ b/apps/api/src/routes/api/v1/settings/types.ts @@ -28,4 +28,15 @@ export interface CheckDNS { Params: { domain: string, } +} +export interface SaveSSHKey { + Body: { + privateKey: string, + name: string + } +} +export interface DeleteSSHKey { + Body: { + id: string + } } \ No newline at end of file diff --git a/apps/api/src/routes/webhooks/github/handlers.ts b/apps/api/src/routes/webhooks/github/handlers.ts index cb37029f4..968305a97 100644 --- a/apps/api/src/routes/webhooks/github/handlers.ts +++ b/apps/api/src/routes/webhooks/github/handlers.ts @@ -75,18 +75,18 @@ export async function gitHubEvents(request: FastifyRequest): Promi if (!allowedGithubEvents.includes(githubEvent)) { throw { status: 500, message: 'Event not allowed.' } } - let repository, projectId, branch; + let projectId, branch; const body = request.body if (githubEvent === 'push') { - repository = body.repository; - projectId = repository.id; - branch = body.ref.split('/')[2]; + projectId = body.repository.id; + branch = body.ref.includes('/') ? body.ref.split('/')[2] : body.ref; } else if (githubEvent === 'pull_request') { - repository = body.pull_request.head.repo; - projectId = repository.id; - branch = body.pull_request.head.ref.split('/')[2]; + projectId = body.pull_request.base.repo.id; + branch = body.pull_request.base.ref.includes('/') ? body.pull_request.base.ref.split('/')[2] : body.pull_request.base.ref; + } + if (!projectId || !branch) { + throw { status: 500, message: 'Cannot parse projectId or branch from the webhook?!' } } - const applicationFound = await getApplicationFromDBWebhook(projectId, branch); if (applicationFound) { const webhookSecret = applicationFound.gitSource.githubApp.webhookSecret || null; @@ -154,7 +154,7 @@ export async function gitHubEvents(request: FastifyRequest): Promi } else if (githubEvent === 'pull_request') { const pullmergeRequestId = body.number; const pullmergeRequestAction = body.action; - const sourceBranch = body.pull_request.head.ref; + const sourceBranch = body.pull_request.head.ref.includes('/') ? body.pull_request.head.ref.split('/')[2] : body.pull_request.head.ref; if (!allowedActions.includes(pullmergeRequestAction)) { throw { status: 500, message: 'Action not allowed.' } } @@ -162,8 +162,10 @@ export async function gitHubEvents(request: FastifyRequest): Promi if (applicationFound.settings.previews) { if (applicationFound.destinationDockerId) { const isRunning = await checkContainer( - applicationFound.destinationDocker.engine, - applicationFound.id + { + dockerId: applicationFound.destinationDocker.id, + container: applicationFound.id + } ); if (!isRunning) { throw { status: 500, message: 'Application not running.' } @@ -204,8 +206,7 @@ export async function gitHubEvents(request: FastifyRequest): Promi } else if (pullmergeRequestAction === 'closed') { if (applicationFound.destinationDockerId) { const id = `${applicationFound.id}-${pullmergeRequestId}`; - const engine = applicationFound.destinationDocker.engine; - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: applicationFound.destinationDocker.id }); } return { message: 'Removed preview. Thank you!' diff --git a/apps/api/src/routes/webhooks/github/types.ts b/apps/api/src/routes/webhooks/github/types.ts index c7502ec6d..1c6f6f9f4 100644 --- a/apps/api/src/routes/webhooks/github/types.ts +++ b/apps/api/src/routes/webhooks/github/types.ts @@ -8,12 +8,22 @@ export interface GitHubEvents { Body: { number: string, action: string, - repository: string, + repository: { + id: string, + }, ref: string, pull_request: { + base: { + ref: string, + repo: { + id: string, + } + }, head: { ref: string, - repo: string + repo: { + id: string, + } } } } diff --git a/apps/api/src/routes/webhooks/gitlab/handlers.ts b/apps/api/src/routes/webhooks/gitlab/handlers.ts index dfe310ed7..0e7f8ec5d 100644 --- a/apps/api/src/routes/webhooks/gitlab/handlers.ts +++ b/apps/api/src/routes/webhooks/gitlab/handlers.ts @@ -34,7 +34,6 @@ export async function configureGitLabApp(request: FastifyRequest) { if (applicationFound.settings.previews) { if (applicationFound.destinationDockerId) { const isRunning = await checkContainer( - applicationFound.destinationDocker.engine, - applicationFound.id + { + dockerId: applicationFound.destinationDocker.id, + container: applicationFound.id + } ); if (!isRunning) { throw { status: 500, message: 'Application not running.' } @@ -164,7 +165,7 @@ export async function gitLabEvents(request: FastifyRequest) { if (applicationFound.destinationDockerId) { const id = `${applicationFound.id}-${pullmergeRequestId}`; const engine = applicationFound.destinationDocker.engine; - await removeContainer({ id, engine }); + await removeContainer({ id, dockerId: applicationFound.destinationDocker.id }); } return { message: 'Removed preview. Thank you!' diff --git a/apps/api/src/routes/webhooks/traefik/handlers.ts b/apps/api/src/routes/webhooks/traefik/handlers.ts index 879ccd960..de805ee71 100644 --- a/apps/api/src/routes/webhooks/traefik/handlers.ts +++ b/apps/api/src/routes/webhooks/traefik/handlers.ts @@ -1,6 +1,5 @@ import { FastifyRequest } from "fastify"; -import { asyncExecShell, errorHandler, getDomain, isDev, listServicesWithIncludes, prisma, supportedServiceTypesAndVersions } from "../../../lib/common"; -import { getEngine } from "../../../lib/docker"; +import { errorHandler, getDomain, isDev, prisma, supportedServiceTypesAndVersions, include, executeDockerCmd } from "../../../lib/common"; import { TraefikOtherConfiguration } from "./types"; function configureMiddleware( @@ -167,6 +166,7 @@ export async function traefikConfiguration(request, reply) { } }; const applications = await prisma.application.findMany({ + where: { destinationDocker: { remoteEngine: false } }, include: { destinationDocker: true, settings: true } }); const data = { @@ -184,7 +184,7 @@ export async function traefikConfiguration(request, reply) { settings: { previews, dualCerts } } = application; if (destinationDockerId) { - const { engine, network } = destinationDocker; + const { network, id: dockerId } = destinationDocker; const isRunning = true; if (fqdn) { const domain = getDomain(fqdn); @@ -205,10 +205,7 @@ export async function traefikConfiguration(request, reply) { }); } if (previews) { - const host = getEngine(engine); - const { stdout } = await asyncExecShell( - `DOCKER_HOST=${host} docker container ls --filter="status=running" --filter="network=${network}" --filter="name=${id}-" --format="{{json .Names}}"` - ); + const { stdout } = await executeDockerCmd({ dockerId, command: `docker container ls --filter="status=running" --filter="network=${network}" --filter="name=${id}-" --format="{{json .Names}}"` }) const containers = stdout .trim() .split('\n') @@ -235,7 +232,11 @@ export async function traefikConfiguration(request, reply) { } } } - const services = await listServicesWithIncludes(); + const services: any = await prisma.service.findMany({ + where: { destinationDocker: { remoteEngine: false } }, + include, + orderBy: { createdAt: 'desc' }, + }); for (const service of services) { const { @@ -248,7 +249,6 @@ export async function traefikConfiguration(request, reply) { plausibleAnalytics } = service; if (destinationDockerId) { - const { engine } = destinationDocker; const found = supportedServiceTypesAndVersions.find((a) => a.name === type); if (found) { const port = found.ports.main; @@ -487,4 +487,219 @@ export async function traefikOtherConfiguration(request: FastifyRequest a) + .map((c) => c.replace(/"/g, '')); + if (containers.length > 0) { + for (const container of containers) { + const previewDomain = `${container.split('-')[1]}.${domain}`; + const nakedDomain = previewDomain.replace(/^www\./, ''); + data.applications.push({ + id: container, + container, + port: port || 3000, + domain: previewDomain, + isRunning, + nakedDomain, + isHttps, + isWWW, + isDualCerts: dualCerts + }); + } + } + } + } + } + } + const services: any = await prisma.service.findMany({ + where: { destinationDocker: { id } }, + include, + orderBy: { createdAt: 'desc' } + }); + + for (const service of services) { + const { + fqdn, + id, + type, + destinationDocker, + destinationDockerId, + dualCerts, + plausibleAnalytics + } = service; + if (destinationDockerId) { + const found = supportedServiceTypesAndVersions.find((a) => a.name === type); + if (found) { + const port = found.ports.main; + const publicPort = service[type]?.publicPort; + const isRunning = true; + if (fqdn) { + const domain = getDomain(fqdn); + const nakedDomain = domain.replace(/^www\./, ''); + const isHttps = fqdn.startsWith('https://'); + const isWWW = fqdn.includes('www.'); + if (isRunning) { + // Plausible Analytics custom script + let scriptName = false; + if (type === 'plausibleanalytics' && plausibleAnalytics.scriptName !== 'plausible.js') { + scriptName = plausibleAnalytics.scriptName; + } + + let container = id; + let otherDomain = null; + let otherNakedDomain = null; + let otherIsHttps = null; + let otherIsWWW = null; + + if (type === 'minio' && service.minio.apiFqdn) { + otherDomain = getDomain(service.minio.apiFqdn); + otherNakedDomain = otherDomain.replace(/^www\./, ''); + otherIsHttps = service.minio.apiFqdn.startsWith('https://'); + otherIsWWW = service.minio.apiFqdn.includes('www.'); + } + data.services.push({ + id, + container, + type, + otherDomain, + otherNakedDomain, + otherIsHttps, + otherIsWWW, + port, + publicPort, + domain, + nakedDomain, + isRunning, + isHttps, + isWWW, + isDualCerts: dualCerts, + scriptName + }); + } + } + } + } + } + + + for (const application of data.applications) { + configureMiddleware(application, traefik); + } + for (const service of data.services) { + const { id, scriptName } = service; + + configureMiddleware(service, traefik); + if (service.type === 'minio') { + service.id = id + '-minio'; + service.container = id; + service.domain = service.otherDomain; + service.nakedDomain = service.otherNakedDomain; + service.isHttps = service.otherIsHttps; + service.isWWW = service.otherIsWWW; + service.port = 9000; + configureMiddleware(service, traefik); + } + + if (scriptName) { + traefik.http.middlewares[`${id}-redir`] = { + replacepathregex: { + regex: `/js/${scriptName}`, + replacement: '/js/plausible.js' + } + }; + } + } + for (const coolify of data.coolify) { + configureMiddleware(coolify, traefik); + } + if (Object.keys(traefik.http.routers).length === 0) { + traefik.http.routers = null; + } + if (Object.keys(traefik.http.services).length === 0) { + traefik.http.services = null; + } + return { + ...traefik + } + } catch ({ status, message }) { + return errorHandler({ status, message }) + } } \ No newline at end of file diff --git a/apps/api/src/routes/webhooks/traefik/index.ts b/apps/api/src/routes/webhooks/traefik/index.ts index 1d69be739..f9c7ff4b8 100644 --- a/apps/api/src/routes/webhooks/traefik/index.ts +++ b/apps/api/src/routes/webhooks/traefik/index.ts @@ -1,10 +1,12 @@ import { FastifyPluginAsync } from 'fastify'; -import { traefikConfiguration, traefikOtherConfiguration } from './handlers'; +import { remoteTraefikConfiguration, traefikConfiguration, traefikOtherConfiguration } from './handlers'; import { TraefikOtherConfiguration } from './types'; const root: FastifyPluginAsync = async (fastify): Promise => { fastify.get('/main.json', async (request, reply) => traefikConfiguration(request, reply)); fastify.get('/other.json', async (request, reply) => traefikOtherConfiguration(request)); + + fastify.get('/remote/:id', async (request) => remoteTraefikConfiguration(request)); }; export default root; diff --git a/apps/api/src/types.ts b/apps/api/src/types.ts index 71f3db158..6367fd566 100644 --- a/apps/api/src/types.ts +++ b/apps/api/src/types.ts @@ -36,3 +36,4 @@ export interface SaveDatabaseSettings extends OnlyId { } + diff --git a/apps/ui/package.json b/apps/ui/package.json index babb6755c..0a64124f0 100644 --- a/apps/ui/package.json +++ b/apps/ui/package.json @@ -1,13 +1,12 @@ { - "name": "coolify-ui", + "name": "ui", "description": "Coolify's SvelteKit UI", - "license": "AGPL-3.0", + "license": "Apache-2.0", "scripts": { "dev": "vite dev", "build": "vite build", "package": "svelte-kit package", "preview": "svelte-kit preview", - "prepare": "svelte-kit sync", "test": "playwright test", "check": "svelte-check --tsconfig ./tsconfig.json", "check:watch": "svelte-check --tsconfig ./tsconfig.json --watch", @@ -15,32 +14,32 @@ "format": "prettier --write --plugin-search-dir=. ." }, "devDependencies": { - "@playwright/test": "1.23.3", - "@sveltejs/kit": "1.0.0-next.375", + "@playwright/test": "1.24.2", + "@sveltejs/kit": "1.0.0-next.405", "@types/js-cookie": "3.0.2", - "@typescript-eslint/eslint-plugin": "5.30.6", - "@typescript-eslint/parser": "5.30.6", - "autoprefixer": "10.4.7", - "eslint": "8.19.0", + "@typescript-eslint/eslint-plugin": "5.33.0", + "@typescript-eslint/parser": "5.33.0", + "autoprefixer": "10.4.8", + "eslint": "8.21.0", "eslint-config-prettier": "8.5.0", "eslint-plugin-svelte3": "4.0.0", - "postcss": "8.4.14", + "postcss": "8.4.16", "prettier": "2.7.1", "prettier-plugin-svelte": "2.7.0", "svelte": "3.49.0", "svelte-check": "2.8.0", "svelte-preprocess": "4.10.7", - "tailwindcss": "3.1.6", + "tailwindcss": "3.1.8", "tailwindcss-scrollbar": "0.1.0", "tslib": "2.4.0", "typescript": "4.7.4", - "vite": "^3.0.0" + "vite": "3.0.5" }, "type": "module", "dependencies": { - "@sveltejs/adapter-static": "1.0.0-next.36", - "@zerodevx/svelte-toast": "0.7.2", + "@sveltejs/adapter-static": "1.0.0-next.39", "cuid": "2.1.8", + "daisyui": "2.22.0", "js-cookie": "3.0.1", "p-limit": "4.0.0", "svelte-select": "4.4.7", diff --git a/apps/ui/src/app.d.ts b/apps/ui/src/app.d.ts index ed5f4e197..ebc7c2668 100644 --- a/apps/ui/src/app.d.ts +++ b/apps/ui/src/app.d.ts @@ -21,4 +21,5 @@ declare namespace App { } declare const GITPOD_WORKSPACE_URL: string + \ No newline at end of file diff --git a/apps/ui/src/app.html b/apps/ui/src/app.html index af82b3042..16d104bc7 100644 --- a/apps/ui/src/app.html +++ b/apps/ui/src/app.html @@ -1,5 +1,5 @@ - + diff --git a/apps/ui/src/lib/api.ts b/apps/ui/src/lib/api.ts index fc11e09d3..b494f6d8c 100644 --- a/apps/ui/src/lib/api.ts +++ b/apps/ui/src/lib/api.ts @@ -7,10 +7,12 @@ export function getAPIUrl() { const newURL = href.replace('https://', 'https://3001-').replace(/\/$/, '') return newURL } + if (CODESANDBOX_HOST) { + return `https://${CODESANDBOX_HOST.replace(/\$PORT/,'3001')}` + } return dev ? 'http://localhost:3001' : 'http://localhost:3000'; } export function getWebhookUrl(type: string) { - console.log(GITPOD_WORKSPACE_URL) if (GITPOD_WORKSPACE_URL) { const { href } = new URL(GITPOD_WORKSPACE_URL) const newURL = href.replace('https://', 'https://3001-').replace(/\/$/, '') @@ -21,6 +23,15 @@ export function getWebhookUrl(type: string) { return `${newURL}/webhooks/gitlab/events` } } + if (CODESANDBOX_HOST) { + const newURL = CODESANDBOX_HOST.replace(/\$PORT/,'3001') + if (type === 'github') { + return `${newURL}/webhooks/github/events` + } + if (type === 'gitlab') { + return `${newURL}/webhooks/gitlab/events` + } + } return `https://webhook.site/0e5beb2c-4e9b-40e2-a89e-32295e570c21/events`; } async function send({ diff --git a/apps/ui/src/lib/common.ts b/apps/ui/src/lib/common.ts index e0396f19c..f45cf6a52 100644 --- a/apps/ui/src/lib/common.ts +++ b/apps/ui/src/lib/common.ts @@ -1,4 +1,4 @@ -import { toast } from '@zerodevx/svelte-toast'; +import { addToast } from '$lib/store'; export const supportedServiceTypesAndVersions = [ { @@ -167,12 +167,20 @@ export const asyncSleep = (delay: number) => export function errorNotification(error: any): void { if (error.message) { if (error.message === 'Cannot read properties of undefined (reading \'postMessage\')') { - toast.push('Currently there is background process in progress. Please try again later.'); - return; + return addToast({ + message: 'Currently there is background process in progress. Please try again later.', + type: 'error', + }); } - toast.push(error.message); + addToast({ + message: error.message, + type: 'error', + }); } else { - toast.push('Ooops, something is not okay, are you okay?'); + addToast({ + message: 'Ooops, something is not okay, are you okay?', + type: 'error', + }); } console.error(JSON.stringify(error)); } diff --git a/apps/ui/src/lib/components/CopyPasswordField.svelte b/apps/ui/src/lib/components/CopyPasswordField.svelte index 103065601..0a6fba471 100644 --- a/apps/ui/src/lib/components/CopyPasswordField.svelte +++ b/apps/ui/src/lib/components/CopyPasswordField.svelte @@ -1,6 +1,6 @@ diff --git a/apps/ui/src/lib/components/Setting.svelte b/apps/ui/src/lib/components/Setting.svelte index d1cf55dc0..30f45755b 100644 --- a/apps/ui/src/lib/components/Setting.svelte +++ b/apps/ui/src/lib/components/Setting.svelte @@ -17,13 +17,14 @@
+ export let type = 'info'; + + +
+ + +
diff --git a/apps/ui/src/lib/components/Toasts.svelte b/apps/ui/src/lib/components/Toasts.svelte new file mode 100644 index 000000000..2fdec7190 --- /dev/null +++ b/apps/ui/src/lib/components/Toasts.svelte @@ -0,0 +1,22 @@ + + +{#if $toasts} +
+ +
+{/if} + + diff --git a/apps/ui/src/lib/components/UpdateAvailable.svelte b/apps/ui/src/lib/components/UpdateAvailable.svelte index 4171a1c5f..ba27d001b 100644 --- a/apps/ui/src/lib/components/UpdateAvailable.svelte +++ b/apps/ui/src/lib/components/UpdateAvailable.svelte @@ -1,8 +1,7 @@ @@ -132,7 +144,7 @@
{usage?.disk.usedGb}GB
-
diff --git a/apps/ui/src/lib/locales/en.json b/apps/ui/src/lib/locales/en.json index cab9ecc80..809d38851 100644 --- a/apps/ui/src/lib/locales/en.json +++ b/apps/ui/src/lib/locales/en.json @@ -88,7 +88,7 @@ "removing": "Removing...", "remove_domain": "Remove domain", "public_port_range": "Public Port Range", - "public_port_range_explainer": "Ports used to expose databases/services/internal services.
Add them to your firewall (if applicable).

You can specify a range of ports, eg: 9000-9100", + "public_port_range_explainer": "Ports used to expose databases/services/internal services.
Add them to your firewall (if applicable).

You can specify a range of ports, eg: 9000-9100", "no_actions_available": "No actions available", "admin_api_key": "Admin API key" }, @@ -144,8 +144,8 @@ }, "preview": { "need_during_buildtime": "Need during buildtime?", - "setup_secret_app_first": "You can add secrets to PR/MR deployments. Please add secrets to the application first.
Useful for creating staging environments.", - "values_overwriting_app_secrets": "These values overwrite application secrets in PR/MR deployments. Useful for creating staging environments.", + "setup_secret_app_first": "You can add secrets to PR/MR deployments. Please add secrets to the application first.
Useful for creating staging environments.", + "values_overwriting_app_secrets": "These values overwrite application secrets in PR/MR deployments. Useful for creating staging environments.", "redeploy": "Redeploy", "no_previews_available": "No previews available" }, @@ -167,15 +167,15 @@ "permission_denied_stop_application": "You do not have permission to stop the application.", "rebuild_application": "Rebuild application", "permission_denied_rebuild_application": "You do not have permission to rebuild application.", - "build_and_start_application": "Build and start application", - "permission_denied_build_and_start_application": "You do not have permission to Build and start application.", + "build_and_start_application": "Deploy", + "permission_denied_build_and_start_application": "You do not have permission to deploy application.", "configurations": "Configurations", "secret": "Secrets", "persistent_storage": "Persistent Storage", "previews": "Previews", "logs": "Application Logs", "build_logs": "Build Logs", - "delete_application": "Delete application", + "delete_application": "Delete", "permission_denied_delete_application": "You do not have permission to delete this application", "domain_already_in_use": "Domain {{domain}} is already used.", "dns_not_set_error": "DNS not set correctly or propogated for {{domain}}.

Please check your DNS settings.", @@ -194,14 +194,14 @@ "application": "Application", "url_fqdn": "URL (FQDN)", "domain_fqdn": "Domain (FQDN)", - "https_explainer": "If you specify https, the application will be accessible only over https. SSL certificate will be generated for you.
If you specify www, the application will be redirected (302) from non-www and vice versa.

To modify the domain, you must first stop the application.

You must set your DNS to point to the server IP in advance.", + "https_explainer": "If you specify https, the application will be accessible only over https. SSL certificate will be generated for you.
If you specify www, the application will be redirected (302) from non-www and vice versa.

To modify the domain, you must first stop the application.

You must set your DNS to point to the server IP in advance.", "ssl_www_and_non_www": "Generate SSL for www and non-www?", - "ssl_explainer": "It will generate certificates for both www and non-www.
You need to have both DNS entries set in advance.

Useful if you expect to have visitors on both.", + "ssl_explainer": "It will generate certificates for both www and non-www.
You need to have both DNS entries set in advance.

Useful if you expect to have visitors on both.", "install_command": "Install Command", "build_command": "Build Command", "start_command": "Start Command", - "directory_to_use_explainer": "Directory to use as the base for all commands.
Could be useful with monorepos.", - "publish_directory_explainer": "Directory containing all the assets for deployment.
For example: dist,_site or public.", + "directory_to_use_explainer": "Directory to use as the base for all commands.
Could be useful with monorepos.", + "publish_directory_explainer": "Directory containing all the assets for deployment.
For example: dist,_site or public.", "features": "Features", "enable_automatic_deployment": "Enable Automatic Deployment", "enable_auto_deploy_webhooks": "Enable automatic deployment through webhooks.", @@ -227,17 +227,17 @@ "select_database_type": "Select a Database type", "select_database_version": "Select a Database version", "confirm_stop": "Are you sure you would like to stop {{name}}?", - "stop_database": "Stop database", + "stop_database": "Stop", "permission_denied_stop_database": "You do not have permission to stop the database.", - "start_database": "Start database", + "start_database": "Start", "permission_denied_start_database": "You do not have permission to start the database.", - "delete_database": "Delete Database", + "delete_database": "Delete", "permission_denied_delete_database": "You do not have permission to delete a Database", "no_databases_found": "No databases found", - "logs": "Database Logs" + "logs": "Logs" }, "destination": { - "delete_destination": "Delete Destination", + "delete_destination": "Delete", "permission_denied_delete_destination": "You do not have permission to delete this destination", "add_to_coolify": "Add to Coolify", "coolify_proxy_stopped": "Coolify Proxy stopped!", @@ -250,7 +250,7 @@ "no_destination_found": "No destination found", "new_error_network_already_exists": "Network {{network}} already configured for another team!", "new": { - "saving_and_configuring_proxy": "Saving and configuring proxy...", + "saving_and_configuring_proxy": "Saving...", "install_proxy": "This will install a proxy on the destination to allow you to access your applications and services without any manual configuration (recommended for Docker).

Databases will have their own proxy.", "add_new_destination": "Add New Destination", "predefined_destinations": "Predefined destinations" @@ -267,7 +267,7 @@ "official_providers": "Official providers" }, "no_git_sources_found": "No git sources found", - "delete_git_source": "Delete Git Source", + "delete_git_source": "Delete", "permission_denied": "You do not have permission to delete a Git Source", "create_new_app": "Create new {{name}} App", "change_app_settings": "Change {{name}} App Settings", @@ -293,20 +293,20 @@ "generate_www_non_www_ssl": "It will generate certificates for both www and non-www.
You need to have both DNS entries set in advance.

Service needs to be restarted." }, "service": { - "stop_service": "Stop Service", + "stop_service": "Stop", "permission_denied_stop_service": "You do not have permission to stop the service.", - "start_service": "Start Service", + "start_service": "Start", "permission_denied_start_service": "You do not have permission to start the service.", - "delete_service": "Delete Service", + "delete_service": "Delete", "permission_denied_delete_service": "You do not have permission to delete a service.", "no_service": "No services found", - "logs": "Service Logs" + "logs": "Logs" }, "setting": { "change_language": "Change Language", "permission_denied": "You do not have permission to do this. \\nAsk an admin to modify your permissions.", "domain_removed": "Domain removed", - "ssl_explainer": "If you specify https, Coolify will be accessible only over https. SSL certificate will be generated for you.
If you specify www, Coolify will be redirected (302) from non-www and vice versa.

WARNING: If you change an already set domain, it will brake webhooks and other integrations! You need to manually update them.", + "ssl_explainer": "If you specify https, Coolify will be accessible only over https. SSL certificate will be generated for you.
If you specify www, Coolify will be redirected (302) from non-www and vice versa.

WARNING: If you change an already set domain, it will brake webhooks and other integrations! You need to manually update them.", "must_remove_domain_before_changing": "Must remove the domain before you can change this setting.", "registration_allowed": "Registration allowed?", "registration_allowed_explainer": "Allow further registrations to the application.
It's turned off after the first registration.", @@ -314,7 +314,7 @@ "credential_stat_explainer": "Credentials for stats page.", "auto_update_enabled": "Auto update enabled?", "auto_update_enabled_explainer": "Enable automatic updates for Coolify. It will be done automatically behind the scenes, if there is no build process running.", - "generate_www_non_www_ssl": "It will generate certificates for both www and non-www.
You need to have both DNS entries set in advance.", + "generate_www_non_www_ssl": "It will generate certificates for both www and non-www.
You need to have both DNS entries set in advance.", "is_dns_check_enabled": "DNS check enabled?", "is_dns_check_enabled_explainer": "You can disable DNS check before creating SSL certificates.

Turning it off is useful when Coolify is behind a reverse proxy or tunnel." }, diff --git a/apps/ui/src/lib/locales/fr.json b/apps/ui/src/lib/locales/fr.json index 242ac6fce..418f72526 100644 --- a/apps/ui/src/lib/locales/fr.json +++ b/apps/ui/src/lib/locales/fr.json @@ -50,7 +50,7 @@ "delete_application": "Supprimer l'application", "deployment_queued": "Déploiement en file d'attente.", "destination": "Destination", - "directory_to_use_explainer": "Répertoire à utiliser comme base pour toutes les commandes.
Pourrait être utile avec monorepos.", + "directory_to_use_explainer": "Répertoire à utiliser comme base pour toutes les commandes.
Pourrait être utile avec monorepos.", "dns_not_set_error": "DNS non défini ou propagé pour {{domain}}.

Veuillez vérifier vos paramètres DNS.", "dns_not_set_partial_error": "DNS non défini", "domain_already_in_use": "Le domaine {{domain}} est déjà utilisé.", @@ -65,7 +65,7 @@ "features": "Caractéristiques", "git_repository": "Dépôt Git", "git_source": "Source Git", - "https_explainer": "Si vous spécifiez https, l'application sera accessible uniquement via https. \nUn certificat SSL sera généré pour vous.
Si vous spécifiez www, l'application sera redirigée (302) à partir de non-www et vice versa \n.

Pour modifier le domaine, vous devez d'abord arrêter l'application.

Vous devez configurer, en avance, votre DNS pour pointer vers l'IP du serveur.", + "https_explainer": "Si vous spécifiez https, l'application sera accessible uniquement via https. \nUn certificat SSL sera généré pour vous.
Si vous spécifiez www, l'application sera redirigée (302) à partir de non-www et vice versa \n.

Pour modifier le domaine, vous devez d'abord arrêter l'application.

Vous devez configurer, en avance, votre DNS pour pointer vers l'IP du serveur.", "install_command": "Commande d'installation", "logs": "Journaux des applications", "no_applications_found": "Aucune application trouvée", @@ -78,11 +78,11 @@ "need_during_buildtime": "Besoin pendant la build ?", "no_previews_available": "Aucun aperçu disponible", "redeploy": "Redéployer", - "setup_secret_app_first": "Vous pouvez ajouter des secrets aux déploiements PR/MR. \nVeuillez d'abord ajouter des secrets à l'application. \n
Utile pour créer des environnements de mise en scène.", - "values_overwriting_app_secrets": "Ces valeurs remplacent les secrets d'application dans les déploiements PR/MR. \nUtile pour créer des environnements de mise en scène." + "setup_secret_app_first": "Vous pouvez ajouter des secrets aux déploiements PR/MR. \nVeuillez d'abord ajouter des secrets à l'application. \n
Utile pour créer des environnements de mise en scène.", + "values_overwriting_app_secrets": "Ces valeurs remplacent les secrets d'application dans les déploiements PR/MR. \nUtile pour créer des environnements de mise en scène." }, "previews": "Aperçus", - "publish_directory_explainer": "Répertoire contenant tous les actifs à déployer. \n
Par exemple : dist,_site ou public.", + "publish_directory_explainer": "Répertoire contenant tous les actifs à déployer. \n
Par exemple : dist,_site ou public.", "rebuild_application": "Re-build l'application", "secret": "secrets", "secrets": { @@ -91,7 +91,7 @@ "use_isbuildsecret": "Utiliser isBuildSecret" }, "settings_saved": "Paramètres sauvegardés.", - "ssl_explainer": "Il générera des certificats pour www et non-www. \n
Vous devez avoir les deux entrées DNS définies à l'avance.

Utile si vous prévoyez d'avoir des visiteurs sur les deux.", + "ssl_explainer": "Il générera des certificats pour www et non-www. \n
Vous devez avoir les deux entrées DNS définies à l'avance.

Utile si vous prévoyez d'avoir des visiteurs sur les deux.", "ssl_www_and_non_www": "Générer SSL pour www et non-www ?", "start_command": "Démarrer la commande", "stop_application": "Arrêter l'application", @@ -181,7 +181,7 @@ "path": "Chemin", "port": "Port", "public_port_range": "Gamme de ports publics", - "public_port_range_explainer": "Ports utilisés pour exposer les bases de données/services/services internes.
Ajoutez-les à votre pare-feu (le cas échéant).

Vous pouvez spécifier une plage de ports, par exemple : 9000-9100", + "public_port_range_explainer": "Ports utilisés pour exposer les bases de données/services/services internes.
Ajoutez-les à votre pare-feu (le cas échéant).

Vous pouvez spécifier une plage de ports, par exemple : 9000-9100", "publish_directory": "Publier le répertoire", "remove": "Retirer", "remove_domain": "Supprimer le domaine", @@ -266,7 +266,7 @@ "permission_denied": "Vous n'avez pas la permission de faire cela. \n\\nDemandez à un administrateur de modifier vos autorisations.", "registration_allowed": "Inscription autorisée ?", "registration_allowed_explainer": "Autoriser d'autres inscriptions à l'application. \n
Il est désactivé après la première inscription.", - "ssl_explainer": "Si vous spécifiez https, Coolify sera accessible uniquement via https. \nUn certificat SSL sera généré pour vous.
Si vous spécifiez www, Coolify sera redirigé (302) à partir de non-www et vice versa." + "ssl_explainer": "Si vous spécifiez https, Coolify sera accessible uniquement via https. \nUn certificat SSL sera généré pour vous.
Si vous spécifiez www, Coolify sera redirigé (302) à partir de non-www et vice versa." }, "source": { "application_id": "ID d'application", diff --git a/apps/ui/src/lib/store.ts b/apps/ui/src/lib/store.ts index 245e8d6eb..f1c5d05bf 100644 --- a/apps/ui/src/lib/store.ts +++ b/apps/ui/src/lib/store.ts @@ -1,6 +1,8 @@ -import { writable, readable, type Writable, type Readable } from 'svelte/store'; +import { writable, readable, type Writable } from 'svelte/store'; interface AppSession { + ipv4: string | null, + ipv6: string | null, version: string | null, userId: string | null, teamId: string | null, @@ -15,8 +17,15 @@ interface AppSession { gitlab: string | null, } } +interface AddToast { + type?: "info" | "success" | "error", + message: string, + timeout?: number | undefined + } export const loginEmail: Writable = writable() export const appSession: Writable = writable({ + ipv4: null, + ipv6: null, version: null, userId: null, teamId: null, @@ -31,7 +40,6 @@ export const appSession: Writable = writable({ gitlab: null } }); -export const isTraefikUsed: Writable = writable(false); export const disabledButton: Writable = writable(false); export const status: Writable = writable({ application: { @@ -41,14 +49,16 @@ export const status: Writable = writable({ initialLoading: true }, service: { - initialLoading: true, + isRunning: false, + isExited: false, loading: false, - isRunning: false + initialLoading: true }, database: { - initialLoading: true, + isRunning: false, + isExited: false, loading: false, - isRunning: false + initialLoading: true } }); @@ -60,14 +70,41 @@ export const features = readable({ export const location: Writable = writable(null) export const setLocation = (resource: any) => { - console.log(GITPOD_WORKSPACE_URL) if (GITPOD_WORKSPACE_URL && resource.exposePort) { const { href } = new URL(GITPOD_WORKSPACE_URL); const newURL = href .replace('https://', `https://${resource.exposePort}-`) .replace(/\/$/, ''); - location.set(newURL) - } else { - location.set(resource.fqdn) + return location.set(newURL) + } else if (CODESANDBOX_HOST){ + const newURL = `https://${CODESANDBOX_HOST.replace(/\$PORT/,resource.exposePort)}` + return location.set(newURL) } + return location.set(resource.fqdn) +} + +export const toasts: any = writable([]) + +export const dismissToast = (id: number) => { + toasts.update((all: any) => all.filter((t: any) => t.id !== id)) +} + +export const addToast = (toast: AddToast) => { + // Create a unique ID so we can easily find/remove it + // if it is dismissible/has a timeout. + const id = Math.floor(Math.random() * 10000) + + // Setup some sensible defaults for a toast. + const defaults = { + id, + type: 'info', + timeout: 2000, + } + + // Push the toast to the top of the list of toasts + const t = { ...defaults, ...toast } + toasts.update((all: any) => [t, ...all]) + + // If toast is dismissible, dismiss it after "timeout" amount of time. + if (t.timeout) setTimeout(() => dismissToast(id), t.timeout) } \ No newline at end of file diff --git a/apps/ui/src/routes/__layout.svelte b/apps/ui/src/routes/__layout.svelte index a6932eae0..e808f555a 100644 --- a/apps/ui/src/routes/__layout.svelte +++ b/apps/ui/src/routes/__layout.svelte @@ -65,11 +65,12 @@ -
@@ -62,7 +94,7 @@
Preview Deployments
- {application.name} + {application?.name}
{#if application.gitSource?.htmlUrl && application.repository && application.branch} {/if}
-
-
- Useful for creating staging environments." - : "These values overwrite application secrets in PR/MR deployments.
Useful for creating staging environments."} - /> -
- {#if applicationSecrets.length !== 0} - - - - - - - - - - - {#each applicationSecrets as secret} - {#key secret.id} - - s.name === secret.name)} - isPRMRSecret - name={secret.name} - value={secret.value} - isBuildSecret={secret.isBuildSecret} - on:refresh={refreshSecrets} - /> - - {/key} - {/each} - -
{$t('forms.name')}{$t('forms.value')}{$t('application.preview.need_during_buildtime')}{$t('forms.action')}
- {/if} -
- -
-
- {#if containers.length > 0} - {#each containers as container} - -
-
{getDomain(container.fqdn)}
-
-
-
- -
- {/each} - {:else} -
-
- {$t('application.preview.no_previews_available')} -
-
+{#if loading.init} + +{:else} +
+
+ Useful for creating staging environments." + : "These values overwrite application secrets in PR/MR deployments.
Useful for creating staging environments."} + /> +
+ {#if applicationSecrets.length !== 0} + + + + + + + + + + + {#each applicationSecrets as secret} + {#key secret.id} + + s.name === secret.name)} + isPRMRSecret + name={secret.name} + value={secret.value} + isBuildSecret={secret.isBuildSecret} + on:refresh={refreshSecrets} + /> + + {/key} + {/each} + +
{$t('forms.name')}{$t('forms.value')}{$t('application.preview.need_during_buildtime')}{$t('forms.action')}
{/if}
-
+ +
+
+ {#if containers.length > 0} + {#each containers as container} + +
+
{getDomain(container.fqdn)}
+
+
+
+ +
+
+ +
+ {/each} + {:else} +
+
+ {$t('application.preview.no_previews_available')} +
+
+ {/if} +
+
+{/if} diff --git a/apps/ui/src/routes/applications/[id]/secrets.svelte b/apps/ui/src/routes/applications/[id]/secrets.svelte index f5c64ea81..39f6ee3e9 100644 --- a/apps/ui/src/routes/applications/[id]/secrets.svelte +++ b/apps/ui/src/routes/applications/[id]/secrets.svelte @@ -27,7 +27,7 @@ import { t } from '$lib/translations'; import { get } from '$lib/api'; import { saveSecret } from './utils'; - import { toast } from '@zerodevx/svelte-toast'; + import { addToast } from '$lib/store'; const limit = pLimit(1); const { id } = $page.params; @@ -59,7 +59,10 @@ ); batchSecrets = ''; await refreshSecrets(); - toast.push('Secrets saved.'); + addToast({ + message: 'Secrets saved.', + type: 'success' + }); } @@ -147,9 +150,6 @@

Paste .env file