This commit is contained in:
baiobelfer 2025-03-19 08:34:19 +01:00
commit 1235ef5507
5 changed files with 991 additions and 0 deletions

2
conf/.tmux.conf Normal file
View File

@ -0,0 +1,2 @@
set-option -g prefix C-a
set -s set-clipboard on

181
conf/.vimrc Normal file
View File

@ -0,0 +1,181 @@
call plug#begin()
Plug 'tpope/vim-fugitive'
Plug 'tpope/vim-commentary'
Plug 'preservim/nerdtree', { 'on': 'NERDTreeToggle' }
Plug 'neoclide/coc.nvim', {'branch': 'release'}
Plug 'jpalardy/vim-slime'
Plug 'ojroques/vim-oscyank', {'branch': 'main'}
call plug#end()
nnoremap <leader>n :NERDTreeFocus<CR>
nnoremap <C-n> :NERDTree<CR>
nnoremap <C-t> :NERDTreeToggle<CR>
nnoremap <C-f> :NERDTreeFind<CR>
set backspace=indent,eol,start
set mouse=r
packadd termdebug
let g:termdebug_wide=1
let g:termdebugger='riscv32-unknown-elf-gdb'
" Disable compatibility with vi which can cause unexpected issues.
set nocompatible
" Enable type file detection. Vim will be able to try to detect the type of file in use.
filetype on
" Enable plugins and load plugin for the detected file type.
filetype plugin on
" Load an indent file for the detected file type.
filetype indent on
" Turn syntax highlighting on.
syntax on
" Add numbers to each line on the left-hand side.
set number
" Highlight cursor line underneath the cursor horizontally.
set cursorline
" Highlight cursor line underneath the cursor vertically.
"set cursorcolumn
" Set shift width to 4 spaces.
set shiftwidth=4
" Set tab width to 4 columns.
set tabstop=4
" Use space characters instead of tabs.
set expandtab
" Do not save backup files.
set nobackup
" Do not let cursor scroll below or above N number of lines when scrolling.
set scrolloff=10
" Do not wrap lines. Allow long lines to extend as far as the line goes.
set nowrap
" While searching though a file incrementally highlight matching characters as you type.
set incsearch
" Ignore capital letters during search.
set ignorecase
" Override the ignorecase option if searching for capital letters.
" This will allow you to search specifically for capital letters.
set smartcase
" Show partial command you type in the last line of the screen.
set showcmd
" Show the mode you are on the last line.
set showmode
" Show matching words during a search.
set showmatch
" Use highlighting when doing a search.
set hlsearch
" Set the commands to save in history default number is 20.
set history=1000
" Enable auto completion menu after pressing TAB.
set wildmenu
" Make wildmenu behave like similar to Bash completion.
set wildmode=list:longest
" There are certain files that we would never want to edit with Vim.
" Wildmenu will ignore files with these extensions.
set wildignore=*.docx,*.jpg,*.png,*.gif,*.pdf,*.pyc,*.exe,*.flv,*.img,*.xlsx
" set colorscheme OceanicNext
if !exists("g:slime_python_ipython")
let g:slime_python_ipython = 1
endif
let g:slime_target = "tmux"
" Run cell for vim-slime
function! SendCell(pattern)
let start_line = search(a:pattern, 'bnW')
if start_line
let start_line = start_line + 1
else
let start_line = 1
endif
let stop_line = search(a:pattern, 'nW')
if stop_line
let stop_line = stop_line - 1
else
let stop_line = line('$')
endif
call slime#send_range(start_line, stop_line)
endfunction
" Custom vim-slime mappings
let g:slime_no_mappings = 1
xmap <c-c><c-c> <Plug>SlimeRegionSend
nmap <c-c><c-c> :<c-u>call SendCell('^#%%')<cr>
nmap <c-c>v <Plug>SlimeConfig
nmap <leader>c <Plug>OSCYankOperator
nmap <leader>cc <leader>c_
vmap <leader>c <Plug>OSCYankVisual
if (!has('nvim') && !has('clipboard_working'))
" In the event that the clipboard isn't working, it's quite likely that
" the + and * registers will not be distinct from the unnamed register. In
" this case, a:event.regname will always be '' (empty string). However, it
" can be the case that `has('clipboard_working')` is false, yet `+` is
" still distinct, so we want to check them all.
let s:VimOSCYankPostRegisters = ['', '+', '*']
function! s:VimOSCYankPostCallback(event)
if a:event.operator == 'y' && index(s:VimOSCYankPostRegisters, a:event.regname) != -1
call OSCYankRegister(a:event.regname)
endif
endfunction
augroup VimOSCYankPost
autocmd!
autocmd TextYankPost * call s:VimOSCYankPostCallback(v:event)
augroup END
endif
set tags=./tags;,tags;
" Map Ctrl-[ to go back in the tag stack
" Function to display filename aligned to the right
function! ShowFileNameRight()
let l:filename = expand('%:p')
let l:columns = &columns
let l:space = l:columns - len(l:filename) - 2
let l:spaces = repeat(' ', l:space > 0 ? l:space : 0)
echo l:spaces . l:filename
endfunction
" Autocommand to display filename in command area when entering command mode
" autocmd CmdlineEnter * call ShowFileNameRight()

2
conf/__vim-plug Normal file
View File

@ -0,0 +1,2 @@
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim

402
owrt/incus-main.tex Normal file
View File

@ -0,0 +1,402 @@
\documentclass{article}
\usepackage[margin=2cm]{geometry}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{polski}
\usepackage[english]{babel}
\usepackage{listings}
\lstset{
literate=%
{ą}{{\k{a}}}1
{ć}{{\'c}}1
{ę}{{\k{e}}}1
{ł}{{\l{}}}1
{ń}{{\'n}}1
{ó}{{\'o}}1
{ś}{{\'s}}1
{ź}{{\'z}}1
{ż}{{\.z}}1
{Ą}{{\k{A}}}1
{Ć}{{\'C}}1
{Ę}{{\k{E}}}1
{Ł}{{\L{}}}1
{Ń}{{\'N}}1
{Ó}{{\'O}}1
{Ś}{{\'S}}1
{Ź}{{\'Z}}1
{Ż}{{\.Z}}1,
}
\usepackage{xcolor}
\lstset{
backgroundcolor=\color{lightgray},
basicstyle=\ttfamily,
breaklines=true,
breakatwhitespace=true,
% prebreak=\raisebox{0ex}[0ex][0ex]{\ensuremath{\hookleftarrow}},
prebreak={\textbackslash}, % Changed from \raisebox{0ex}[0ex][0ex]{\ensuremath{\hookleftarrow}} to \textbackslash
frame=single,
% showtabs=false,
% showspaces=false,
showstringspaces=false,
keywordstyle=\color{blue},
commentstyle=\color{gray},
}
% % Define custom colors
% \definecolor{vscodePurple}{rgb}{0.5, 0.0, 0.5} % Function names, keywords
% \definecolor{vscodeBlue}{rgb}{0.16, 0.32, 0.75} % Comments
% \definecolor{vscodeGreen}{rgb}{0, 0.6, 0} % Strings
% % Configuration for the listings package
% \lstset{
% basicstyle=\ttfamily,
% columns=fullflexible,
% keywordstyle=\color{vscodePurple},
% stringstyle=\color{vscodeGreen},
% commentstyle=\color{vscodeBlue},
% morecomment=[l][\color{magenta}]{\#},
% frame=single,
% language=Python,
% showstringspaces=false,
% breaklines=true, % Enables line breaking
% postbreak=\mbox{\textcolor{red}{$\hookrightarrow$}\space}, % Marks where a line has been broken
% }
\title{Creating a Custom incus Image}
\author{\textcolor{red}{Draft}}
\date{}
\begin{document}
\maketitle
\section{Introduction}
This document describes the process of creating a custom incus image for Debian system, which includes additional packages.
\section{Requirements}
The following are needed to create a custom image:
\begin{itemize}
\item incus installed
\item Internet access
\item Basic knowledge of package management in Debian
\end{itemize}
\section{Creating a Base Container}
The first step is to create a temporary Debian container that will serve as a base:
\begin{lstlisting}[language=bash]
incus launch images:debian/12 deb
incus profile device remove default eth0
incus network attach incusbr0 deb eth0 eth0
\end{lstlisting}
\newpage
\section{firewall}
\textcolor{red}{... text ...}
\begin{lstlisting}[language=bash]
sudo iptables -t nat -A POSTROUTING -s 10.127.194.1/24 -o eth0 -j MASQUERADE
sudo iptables -A FORWARD -s 10.127.194.1/24 -o eth0 -j ACCEPT
sudo iptables -A FORWARD -d 10.127.194.1/24 -m state --state ESTABLISHED,RELATED -i eth0 -j ACCEPT
\end{lstlisting}
\textcolor{red}{bash cmd script}
\begin{lstlisting}
f() { \
# Sprawdź, czy parametr został podany \
if [ -z "$1" ]; then \
echo "Użycie: f <nazwa_interfejsu>"; \
return 1; \
fi; \
\
# Nazwa interfejsu sieciowego przekazana jako parametr \
INTERFACE="$1"; \
\
# Odczytaj adres IP przypisany do incusbr0 \
IP_ADDR=$(ip -4 addr show incusbr0 | grep -oP '(?<=inet\s)\d+(\.\d+){3}'); \
\
# Oblicz sieć na podstawie adresu IP \
NETWORK=$(echo $IP_ADDR | sed 's/\.[0-9]*$/.0/24'); \
\
# Dodaj reguły iptables \
sudo iptables -t nat -A POSTROUTING -s $NETWORK -o $INTERFACE -j MASQUERADE; \
sudo iptables -A FORWARD -s $NETWORK -o $INTERFACE -j ACCEPT; \
sudo iptables -A FORWARD -d $NETWORK -m state --state ESTABLISHED,RELATED -i $INTERFACE -j ACCEPT; \
\
echo "Reguły iptables zostały dodane dla sieci $NETWORK i interfejsu $INTERFACE."; \
}; f eth0
\end{lstlisting}
\section{Proxy}
The given commands demonstrate how to configure proxy devices within Linux Containers (incus), specifically for forwarding network traffic from the host to containers. This can be useful for a variety of purposes, including exposing a service running inside a container to the outside network.
\begin{lstlisting}
incus config device add incus-owrt0 owrt-proxy proxy listen=tcp:0.0.0.0:1234 connect=tcp:0.0.0.0:80
incus config device add deb deb-proxy proxy listen=tcp:0.0.0.0:1234 connect=tcp:0.0.0.0:1234
\end{lstlisting}
\section{Installing Additional Packages}
Next, use the \texttt{exec} command to launch a shell in the container and install the necessary packages:
\begin{lstlisting}[language=bash]
incus exec deb -- apt update
incus exec deb -- apt install -y \
netplan.io \
sudo vim nano git tmux mc zip unzip curl wget htop lynx\
iproute2 termshark bridge-utils \
python3 python3-ipython python3-pyroute2 python3-scapy \
docker.io docker-compose
\end{lstlisting}
\section{Configuring Users and Permissions}
After installing the additional packages, it's important to configure user access and permissions within the container.
\subsection{Changing the Root Password}
To change the root password to "passroot", execute the following command:
\begin{lstlisting}[language=bash]
echo "root:passroot" | incus exec deb -- chpasswd
\end{lstlisting}
\subsection{Adding a New User}
To add a new user named "user" with the password "pass", and to add this user to the "sudo" and "docker" groups, follow these steps:
\begin{lstlisting}[language=bash]
incus exec deb -- useradd -m -s /bin/bash user
echo "user:pass" | incus exec deb -- chpasswd
incus exec deb -- usermod -aG sudo user
incus exec deb -- usermod -aG docker user
\end{lstlisting}
This series of commands creates a new user with a home directory and bash shell, sets their password, and adds them to the necessary groups for system administration and Docker management.
\section{Configuringing Vim and Tmux inside the Container}
To install Vim-Plug, a plugin manager for Vim, inside the `deb` container, execute the following command from your host system:
\begin{lstlisting}[language=bash]
incus exec deb -- bash -c "curl -fLo /home/user/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim"
\end{lstlisting}
\noindent
Ensure that the specified user directory exists and the user has the necessary permissions to write to it.
\noindent
To transfer a file from your host system to a container, use the `incus file push` command. For example, to push `\_confs.zip` to the `deb` container:
\begin{lstlisting}[language=bash]
incus file push _confs.zip deb/home/user/_confs.zip
#Folder _confs must to be zipped with -rj option
incus exec deb -- bash -c "unzip /home/user/_confs.zip -d /home/user/ && sudo chown -R user:user /home/user/* /home/user/.*"
\end{lstlisting}
\section{Cleaning the Container}
Before creating the image, clean the system of unnecessary files:
\begin{lstlisting}[language=bash]
incus exec deb -- apt clean
incus exec deb -- apt autoremove
\end{lstlisting}
\section{Creating the Image}
After installing all the necessary packages and cleaning the system, create an image from the container:
\begin{lstlisting}[language=bash]
incus stop deb
incus publish deb --alias my-custom-image-deb
\end{lstlisting}
Replace \texttt{my-custom-image} with the chosen name for your image.
\section{Cleanup}
After creating the image, delete the temporary container:
\begin{lstlisting}[language=bash]
incus delete deb
\end{lstlisting}
\section{Using the Image}
Now you can use your custom image to create new containers:
\begin{lstlisting}[language=bash]
incus launch my-custom-image-deb deb
sudo incus network attach incusbr0 deb eth0 eth0
incus project set user-1000 restricted.containers.privilege allow
incus stop deb
incus config set deb security.nesting true
incus config set deb security.privileged true
incus start deb
\end{lstlisting}
\section{Configuring Netplan Interface with DHCP}
This section demonstrates the method to dynamically create and apply a Netplan configuration for setting up a network interface with DHCP on an incus/incus container. The procedure allows for the on-the-fly generation of the configuration file and its immediate application without manually creating or transferring the file.
\subsection{Creating and Transferring the Configuration}
To configure an interface named \texttt{v1a} with DHCP enabled, you can use shell commands combined with incus/incus functionalities. The following commands illustrate how to create the configuration dynamically and apply it directly:
\subsubsection{Using the \texttt{echo} Command}
\textcolor{red}{... text ...}
\begin{lstlisting}[language=bash]
f() { \
sudo python3 veth.py \
-ns1 $1 \
-t1 incus \
-n1 va-$3 \
-b1 $2 \
-ns2 $3 \
-t2 incus \
-n2 vb-$3 \
; }; f incus-owrt0 br-lan deb
\end{lstlisting}
\subsubsection{Using the \texttt{echo} Command}
This Bash function allows you to dynamically create a Netplan configuration file within an incus container. The function takes two parameters: the first parameter \texttt{\$1} is the name of the container, and the second parameter \texttt{\$2} is the name of the network interface. This name is also used to name the Netplan configuration file, resulting in a filename of the form `\texttt{\$2.yaml}`, where `\texttt{\$2}` is replaced with the actual interface name provided as an argument. The configuration enables DHCP for the specified interface.
\begin{lstlisting}[language=bash]
f() { incus exec "$1" -- bash -c "echo \"
network:
version: 2
ethernets:
vb-$1:
dhcp4: true\" > /etc/netplan/vb-$1.yaml"; }; f deb
\end{lstlisting}
To use this command, replace `deb` with the actual name of your incus container, and `v1b` with the name of the network interface you wish to configure. This command creates a Netplan configuration file in the `\texttt{/etc/netplan}` directory of the specified container, with the filename based on the interface name (`\texttt{v1b.yaml}` in this example).
\subsubsection{Applying the Configuration with Netplan}
After creating the Netplan configuration file, apply the changes to activate the network settings using the following command. This command uses the container name provided as the first argument \texttt{\$1} to specify which incus container to apply the Netplan configuration in.
\begin{lstlisting}[language=bash]
f() { incus exec "$1" -- sudo netplan apply; }; f deb
\end{lstlisting}
This command ensures that the newly created Netplan configuration is applied, enabling the network interface with DHCP as specified in the configuration file. Replace `deb` with the actual name of your incus container when executing the command.
\subsubsection{Checking Netplan Status Inside the Container}
To verify the application of the Netplan configuration within an incus container, you could use a `netplan status` command. This example uses the container name provided as the first argument \texttt{\$1}.
\begin{lstlisting}[language=bash]
f() { incus exec "$1" -- sudo netplan status; }; f deb
\end{lstlisting}
Replace `deb` with the actual name of your incus container.
\subsubsection{Cat Netplan conf file}
\textcolor{red}{... text ...}
\begin{lstlisting}[language=bash]
f() { incus exec $1 -- cat /etc/netplan/vb-$1.yaml;} ; f deb
\end{lstlisting}
\section{Exporting and Importing the Image}
To share the custom image with another incus host, you need to export and then import the image.
\subsection{Exporting the Image}
Export the image to a file:
\begin{lstlisting}[language=bash]
incus image export my-custom-image ./my-custom-image
\end{lstlisting}
\subsection{Transferring the Image}
Transfer the image file to the target incus host using scp or another file transfer method:
\begin{lstlisting}[language=bash]
//scp ./my-custom-image.tar.gz user@target-host:/path/to/directory
incus file push ./my-custom-image.tar.gz deb/home.user ./my-custom-image.tar.gz
\end{lstlisting}
\subsection{Importing the Image on the Target Host}
On the target incus host, import the image from the file:
\begin{lstlisting}[language=bash]
incus image import /path/to/directory/my-custom-image.tar.gz --alias my-custom-image
\end{lstlisting}
\newpage
\textcolor{red}{--- ToDo ---}
\section{Creating and Using an incus Image Repository}
After customizing and publishing your incus image, you may want to share it across different hosts. An efficient way to do this is by creating an incus image repository and uploading your image there. This section outlines how to upload a custom image to a repository and then download it on a different host.
\subsection{Setting Up an incus Image Repository}
First, you need to set up an incus image repository. This could be a private server or a service that supports the incus image server protocol. For the sake of this example, we'll assume you have access to a server that can act as an incus image repository.
\subsection{Uploading the Custom Image to the Repository}
To upload your custom image to the repository, you will need to export it from your local incus, then upload it to your repository server. You can use SCP or any file transfer method preferred. Here's how to export and transfer the image:
\begin{verbatim}
incus image export my-custom-image ./my-custom-image.tar.gz
scp ./my-custom-image.tar.gz user@repository-server:/path/to/repository
\end{verbatim}
Replace `my-custom-image` with your image's alias, `user` with your username on the repository server, and `repository-server` with the server's address.
\subsection{Importing the Image on the Repository Server}
Log in to your repository server, and import the image into the incus image repository:
\begin{verbatim}
incus image import /path/to/repository/my-custom-image.tar.gz --alias my-custom-image
\end{verbatim}
\subsection{Accessing the Image from Another Host}
On any other incus host that you want to use the custom image, you first need to add the repository server as a remote incus server:
\begin{verbatim}
incus remote add my-repo repository-server-url
\end{verbatim}
Replace `my-repo` with a name for the remote repository and `repository-server-url` with the actual URL or IP address of your repository server.
Now, you can launch a new container using the custom image from the repository:
\begin{verbatim}
incus launch my-repo:my-custom-image my-new-container
\end{verbatim}
\subsection{Reconfiguring incus for Image Sharing}
If you have already initialized incus on your server and need to adjust its configuration to share images, you may need to reconfigure certain settings. This can be necessary to enable image sharing or to adjust network settings for remote access. Here's how you can modify the incus configuration:
\begin{verbatim}
incus config set core.https_address [::]:8443
incus config set core.trust_password some-secret-password
\end{verbatim}
The first command configures incus to listen for remote connections on all IPv4 and IPv6 addresses on port 8443, which is the default port used by incus for secure remote access. Replace `some-secret-password` with a strong, unique password. This password will be used by remote incus clients to authenticate with your incus server.
\textbf{Note:} If your incus server is behind a firewall or NAT, you may need to set up port forwarding to ensure that remote hosts can connect to your incus server on the specified port (8443 by default).
After reconfiguring incus, you may need to restart the incus service for the changes to take effect. The command to restart incus depends on your system's init system. For systems using systemd, you can use:
\begin{verbatim}
sudo systemctl restart incus
\end{verbatim}
Once incus is configured to accept remote connections and the service has been restarted, you can proceed with uploading images to the repository and accessing them from other hosts as described in the previous sections.
\subsection{Conclusion}
By setting up an incus image repository, you can easily share custom images across multiple hosts. This method simplifies the management of custom images and allows for efficient distribution of containerized environments.
\subsection{Linkownia}
\begin{verbatim}
http://mpabi.pl:1000/vlan/Screen%20recording%202024-03-01%2013.43.59.webm
http://mpabi.pl:1000/vlan/vlan-debs4.tar.gz
\end{verbatim}
\end{document}

404
owrt/main-owrt.tex Normal file
View File

@ -0,0 +1,404 @@
\documentclass{article}
\usepackage[margin=2cm]{geometry}
\usepackage{graphicx} % Add the graphicx package for \reflectbox
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[polish]{babel}
\usepackage{hyperref}
\usepackage[
sortcites,
backend=biber,
hyperref=true,
firstinits=true,
maxbibnames=99,
]{biblatex}
\addbibresource{references.bib}
\title{OpenWRT incus/LXD Container Setup}
\author{
% mp\raisebox{-1.5ex}{\reflectbox{\rotatebox[origin=c]{180}{A}}}b\raisebox{-1.5ex}{\reflectbox{\rotatebox[origin=c]{180}{I}}}
M. Pabiszczak
}
\date{\today} % Date inserted at compile time
\begin{document}
\maketitle
\section*{Comparison between incus and LXD}
Linux Containers (incus) and LXD are both significant technologies in the realm of containerization and virtualization. incus is a pioneering technology that offers operating-system level virtualization through lightweight containers. It allows for running multiple isolated Linux systems (containers) on a single control host. LXD, on the other hand, is an extension of incus, providing a more user-friendly and scalable approach to container management. LXD enhances the capabilities of incus with features like easy storage management, network management, and a REST API for remote control. While LXD builds upon incus, each has its unique use cases and advantages. Below is a comparative analysis of incus and LXD across various features:
\vspace{5mm}
\begin{tabular}{ |p{3cm}|p{6cm}|p{6cm}| }
\hline
\textbf{Feature} & \textbf{incus (Linux Containers)} & \textbf{LXD} \\
\hline
Architecture & Built on Linux kernel features like cgroups and namespaces, offering process and resource isolation. Shares the host's kernel but runs in isolated user spaces. & Built on top of incus, providing a more user-friendly and scalable system with a daemon-based architecture. Adds extra features like a REST API for remote management. \\
\hline
Ease of Use & Known for its simplicity and flexibility in configuration with command-line tools. Requires understanding of Linux namespaces and cgroups. & Provides an easier and more intuitive command-line interface and REST API, making it simpler to use, especially for managing large numbers of containers. \\
\hline
Networking and Storage & Versatile networking options and storage managed using standard Linux filesystems. Containers have their own file system layout. & Inherits incus's capabilities and extends them with improved storage options like ZFS, BTRFS, and LVM, and easier network management. \\
\hline
Security Features & Offers various security features like AppArmor and SELinux support, but not inherently sandboxed. & Enhances security by offering container snapshots, easier transfer of containers with security profiles, and integration with AppArmor and SELinux. \\
\hline
Community and Ecosystem & Strong community and ecosystem with active development. Integrates well with various tools for development and production. & Expands on incuss ecosystem, with support from Canonical (the company behind Ubuntu), ensuring more frequent updates and feature additions. \\
\hline
Use Cases & Suitable for environments where full OS virtualization is unnecessary. Used in CI/CD, development environments, and lightweight application isolation. & Ideal for more extensive container deployments, offering an easier and more scalable container management system. Suitable for both on-premises and cloud environments. \\
\hline
\end{tabular}
\newpage
\section{Finding and Using the Latest OpenWRT Snapshot in LXD}
\subsection{Creating and Configuring a Network Bridge}
\begin{enumerate}
\item \textbf{Creating a New Network Bridge}:\\
To create a new network bridge named \texttt{incusbr0} in LXD, execute the following command:
\begin{verbatim}
sudo incus network create incusbr0
\end{verbatim}
This command creates the network bridge \texttt{incusbr0}. After its creation, you can use this bridge for various networking purposes in LXD.
To delete the network bridge named \texttt{incusbr0} that you have created, use the following command:
\begin{verbatim}
sudo incus network delete incusbr0
\end{verbatim}
This command will remove the network bridge \texttt{incusbr0} from LXD.
\item \textbf{Displaying Information About the Created Network}:\\
To display detailed information about the created network \texttt{incusbr0}, use the following command:
\begin{verbatim}
sudo incus network show incusbr0
\end{verbatim}
This command will provide details about the \texttt{incusbr0} network configuration.
\item \textbf{Attaching and Detaching a Container from a Network Bridge}:\\
To attach a container named \texttt{owrt0} to a network bridge \texttt{incusbr0} and then detach it, use the following commands:
\begin{verbatim}
sudo incus network attach incusbr0 owrt0 eth0 eth0
sudo incus network detach incusbr0 owrt0 eth0
\end{verbatim}
The first command attaches the container \texttt{owrt0} to the network bridge \texttt{incusbr0}, while the second command detaches it from the bridge.
To display the network device configuration of a specific container such as \texttt{owrt0}, use:
\begin{verbatim}
sudo incus config device show owrt0
\end{verbatim}
The output will show the network devices and their configurations for the \texttt{owrt0} container. For example:
\begin{verbatim}
eth0:
name: eth0
network: incusbr0
type: nic
\end{verbatim}
\item \textbf{Adding the New Network Bridge to the Default Profile}:\\
After creating \texttt{incusbr0}, you can add it as a network interface to the default profile using this command:
\begin{verbatim}
sudo incus profile device add default eth0 nic nictype=bridged parent=incusbr0
\end{verbatim}
This adds the bridge \texttt{incusbr0} to the default profile. LXD containers that use the default profile will by default be connected to the newly created bridge \texttt{incusbr0}.
To remove the network interface \texttt{eth0}, which is associated with the \texttt{incusbr0} network bridge, from the default profile, use this command:
\begin{verbatim}
sudo incus profile device remove default eth0
\end{verbatim}
This command removes the network interface \texttt{eth0} from the default profile, disconnecting it from the \texttt{incusbr0} bridge.
\item \textbf{Displaying Information About the Default Profile}:\\
To display detailed information about the default profile in LXD, use the following command:
\begin{verbatim}
sudo incus profile show default
\end{verbatim}
This command will provide details about the \texttt{default} profile configurations.
\end{enumerate}
\subsection{Listing Available Images}
You can view all images in your local LXD store to verify the import. The command below displays a list of images with details like size, upload date, and aliases.
\begin{verbatim}
sudo incus image list
\end{verbatim}
\subsection{Deleting an Image}
If needed, you can remove an image from your local LXD store. Use the following command with either the image's alias or its fingerprint. Remember, this action is irreversible, so make sure to delete the correct image.
\begin{verbatim}
sudo incus image delete [alias_or_fingerprint]
\end{verbatim}
\subsection{Listing OpenWRT Images}
Use the command below to list all available OpenWRT images in the default LXD image repository. This helps you find the desired architecture and version, like `amd64` and the latest release date.
\begin{verbatim}
sudo incus image list images: openwrt
\end{verbatim}
\subsection{Importing the Snapshot Image}
For using a specific OpenWRT snapshot, such as `Openwrt snapshot amd64`, import it into your local LXD image store. This can be done with the following command, which assigns an easy-to-remember alias to the imported image.
\begin{verbatim}
sudo incus image copy images:openwrt/snapshot/amd64 local: --alias openwrt-latest
\end{verbatim}
\subsection{Launching an OpenWRT Container}
Finally, to create and start a new container using the imported OpenWRT image, use this command. It creates a container named \texttt{owrt0} with the OpenWRT image you imported.
\begin{verbatim}
sudo incus launch openwrt-latest owrt0
\end{verbatim}
To list all the currently available LXD containers, including the newly created \texttt{owrt0} container, use the following command:
\begin{verbatim}
sudo incus ls
\end{verbatim}
\subsection{Recreating the OpenWRT Container}
To recreate the OpenWRT container named "owrt0", first stop and destroy the existing container, then recreate it using the same snapshot version. The commands to execute are as follows:
\begin{verbatim}
sudo incus stop owrt0
sudo incus delete owrt0
sudo incus launch openwrt-latest owrt0
\end{verbatim}
\subsection{Attaching a Console to the Container}
To access the interactive console of an LXD container, you can use the \texttt{incus exec} command. This command allows you to run commands inside the container or to access its interactive shell.
\paragraph{Starting an Interactive Console Session:}
To start an interactive console session in an LXD container, use the \texttt{incus exec} command followed by the container's name, then \texttt{--}, and \texttt{bash} or \texttt{ash}, depending on what is available in the container. For example:
\begin{verbatim}
sudo incus exec owrt0 -- ash
\end{verbatim}
This command will start an interactive bash session in the container named \texttt{owrt0}.
\paragraph{Executing a Single Command:}
You can also use \texttt{incus exec} to execute a single command inside the container. For instance, to check the system version inside the container, you can use:
\begin{verbatim}
sudo incus exec owrt0 -- uname -a
\end{verbatim}
This command executes \texttt{uname -a} inside the \texttt{owrt0} container and displays the output.
\section{Configuring Proxy Devices in LXD Containers}
Proxy devices in LXD allow network connections to be forwarded between the host and the containers. This can be particularly useful for exposing services running within a container to the outside network or redirecting traffic from the host to the container.
\subsection{Adding a Proxy Device}
To add a proxy device to an LXD container, you can use the \texttt{incus config device add} command. For example, to forward traffic from port 1234 on the host to port 80 in the container named \texttt{owrt0}, the command would be:
\begin{verbatim}
incus config device add owrt0 owrt-proxy proxy listen=tcp:0.0.0.0:1234 \
connect=tcp:127.0.0.1:80
\end{verbatim}
In this command:
\begin{itemize}
\item \texttt{owrt0} is the name of the container to which the proxy device is being added.
\item \texttt{owrt-proxy} is the arbitrary name given to the new proxy device.
\item The \texttt{listen} parameter specifies the host's IP address and port to listen on. Here, \texttt{0.0.0.0:1234} means all IPv4 addresses on the host on port 1234.
\item The \texttt{connect} parameter specifies the container's IP address and port to connect to. Here, \texttt{127.0.0.1:80} directs the traffic to port 80 on the container.
\end{itemize}
\subsection{Listing Proxy Devices for a Container}
To list all configured devices, including proxy devices for a specific container, you can use the \texttt{incus config device show} command followed by the container name. For the container \texttt{owrt0}, the command is:
\begin{verbatim}
incus config device show owrt0
\end{verbatim}
This command will display a list of all devices attached to the container \texttt{owrt0}, including details about the proxy device(s) configured.
\subsection{Summary}
Proxy devices are a powerful feature of LXD, allowing for flexible network configurations and exposing container services to the host network. By following the commands outlined above, you can easily set up port forwarding for your LXD containers.
% \section{Network Bridge Configuration with dnsmasq}
% To configure a network bridge `mybridge` in LXD using `dnsmasq` for DHCP and NAT, follow these steps:
% \subsection{dnsmasq Installation}
% \begin{verbatim}
% sudo apt-get install dnsmasq
% \end{verbatim}
% \subsection{dnsmasq Configuration}
% Edit the dnsmasq configuration file, usually located at \texttt{/etc/dnsmasq.conf}.
% Add the following configuration:
% \begin{verbatim}
% interface=mybridge
% bind-interfaces
% dhcp-range=192.168.1.10,192.168.1.100,24h
% \end{verbatim}
% This sets up a DHCP server on the `mybridge` with an address pool from 192.168.1.10 to 192.168.1.100, and a lease time of 24 hours.
% \subsection{NAT Configuration}
% Configuring NAT (Network Address Translation) is necessary for containers to communicate with the Internet. Execute the following commands to set up NAT for the `mybridge`:
% \begin{verbatim}
% sudo iptables -t nat -A POSTROUTING -o <your-external-interface> -j MASQUERADE
% sudo iptables -A FORWARD -i mybridge -o <your-external-interface> -j ACCEPT
% sudo iptables -A FORWARD -i <your-external-interface> -o mybridge -m state --state RELATED,ESTABLISHED \
% -j ACCEPT
% \end{verbatim}
% Replace \texttt{<your-external-interface>} with the name of your server's external network interface.
\section*{Configuring the OpenWRT}
After creating the container, configure the network and DHCP settings using UCI commands as follows:
\begin{verbatim}
uci add network device
uci set network.@device[-1].name="br-lan"
uci set network.@device[-1].type="bridge"
uci set network.@device[-1].bridge_empty="1"
uci set network.lan=interface
uci set network.lan.device=br-lan
uci set network.lan.proto=static
uci set network.lan.ipaddr=192.168.100.1
uci set network.lan.netmask=255.255.255.0
uci set dhcp.lan=dhcp
uci set dhcp.lan.start=100
uci set dhcp.lan.limit=150
uci set dhcp.lan.leasetime=2h
uci set dhcp.lan.interface=lan
uci commit network
uci commit firewall
uci commit dhcp
/etc/init.d/network restart
/etc/init.d/firewall restart
/etc/init.d/dnsmasq restart
uci delete network.lan
uci delete network.@device[0]
uci delete network.wan
uci delete network.wan6
uci commit network
uci show network
network.wan=interface
network.wan.ifname='eth0'
network.wan.proto='dhcp'
\end{verbatim}
% \bibliographystyle{plain}
\printbibliography
\end{document}
incus network create incusbr0
incus profile device add default eth0 nic nictype=bridged parent=incusbr0
sudo incus stop owrt0
sudo incus delete owrt0
sudo incus launch openwrt-latest owrt0
bridge -d link show master br-lan
bridge vlan show dev web1A
sudo incus launch ubuntu:22.04 lxd-u1
sudo incus network attach incusbr0 owrt0 eth0 eth0
sudo incus config device show owrt0
eth0:
name: eth0
network: incusbr0
type: nic
sudo incus network detach incusbr0 owrt0 eth0
while true; do clear; diff network network.old; sleep 1; done
incus profile create net
incus profile edit net
config:
user.user-data: |
#net-config
package_upgrade: true
packages:
- aptitude
- tmux
- vim-nox
- nano
- mc
- git
- curl
- termshark
- procps
- sudo
- iproute2
- iputils-ping
- bridge-utils
- ifupdown
- udev
- make
- build-essential
- libssl-dev
- zlib1g-dev
- libbz2-dev
- libreadline-dev
- libsqlite3-dev
- wget
- llvm
- libncurses5-dev
- libncursesw5-dev
- xz-utils
- libffi-dev
- liblzma-dev
- python3-ipython
runcmd:
- echo "user:pass" | chpasswd
- useradd -ms /bin/bash user
- adduser user sudo
description: Custom Ubuntu profile for network tools and utilities
name: net
used_by: []
sudo incus launch ubuntu:22.04 lxd-u1 --profile default --profile net