nsjail/cgroup2.cc

282 lines
8.8 KiB
C++
Raw Normal View History

/*
nsjail - cgroup2 namespacing
-----------------------------------------
Copyright 2014 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "cgroup2.h"
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
2022-11-23 05:15:01 +08:00
#include <linux/magic.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <unistd.h>
#include <fstream>
#include <iostream>
#include <sstream>
#include "logs.h"
#include "util.h"
namespace cgroup2 {
static bool addPidToProcList(const std::string &cgroup_path, pid_t pid) {
std::string pid_str = std::to_string(pid);
LOG_D("Adding pid='%s' to cgroup.procs", pid_str.c_str());
if (!util::writeBufToFile((cgroup_path + "/cgroup.procs").c_str(), pid_str.c_str(),
pid_str.length(), O_WRONLY)) {
LOG_W("Could not update cgroup.procs");
return false;
}
return true;
}
static std::string getCgroupPath(nsjconf_t *nsjconf, pid_t pid) {
return nsjconf->cgroupv2_mount + "/NSJAIL." + std::to_string(pid);
}
static std::string getJailCgroupPath(nsjconf_t *nsjconf) {
return nsjconf->cgroupv2_mount + "/NSJAIL_SELF." + std::to_string(getpid());
}
static bool createCgroup(const std::string &cgroup_path, pid_t pid) {
LOG_D("Create '%s' for pid=%d", cgroup_path.c_str(), (int)pid);
if (mkdir(cgroup_path.c_str(), 0700) == -1 && errno != EEXIST) {
PLOG_W("mkdir('%s', 0700) failed", cgroup_path.c_str());
return false;
}
return true;
}
static bool moveSelfIntoChildCgroup(nsjconf_t *nsjconf) {
2022-11-23 05:15:01 +08:00
/*
* Move ourselves into another group to avoid the 'No internal processes' rule
* https://unix.stackexchange.com/a/713343
*/
std::string jail_cgroup_path = getJailCgroupPath(nsjconf);
LOG_I("nsjail is moving itself to a new child cgroup: %s\n", jail_cgroup_path.c_str());
RETURN_ON_FAILURE(createCgroup(jail_cgroup_path, getpid()));
RETURN_ON_FAILURE(addPidToProcList(jail_cgroup_path, 0));
return true;
}
static bool enableCgroupSubtree(nsjconf_t *nsjconf, const std::string &controller, pid_t pid) {
std::string cgroup_path = nsjconf->cgroupv2_mount;
2022-11-23 05:15:01 +08:00
LOG_D("Enable cgroup.subtree_control +'%s' to '%s' for pid=%d", controller.c_str(),
cgroup_path.c_str(), pid);
std::string val = "+" + controller;
2022-11-23 05:19:05 +08:00
/*
* Try once without moving the nsjail process and if that fails then try moving the nsjail
2022-11-23 05:15:01 +08:00
* process into a child cgroup before trying a second time.
*/
if (util::writeBufToFile((cgroup_path + "/cgroup.subtree_control").c_str(), val.c_str(),
val.length(), O_WRONLY, false)) {
return true;
}
if (errno == EBUSY) {
RETURN_ON_FAILURE(moveSelfIntoChildCgroup(nsjconf));
2022-11-23 05:15:01 +08:00
if (util::writeBufToFile((cgroup_path + "/cgroup.subtree_control").c_str(),
val.c_str(), val.length(), O_WRONLY)) {
return true;
}
}
2022-11-23 05:15:01 +08:00
LOG_E(
cgroup2.cc: improve note about using Docker Improve the error log message when Nsjail fails to write to the `/sys/fs/cgroup/cgroup.subtree_control` file when it attempts to setup the cgroupv2 configuration. The previous message looked like this: ``` [E][2023-05-28T21:52:56+0000][8807] writeBufToFile():105 Couldn't write '7' bytes to file '/sys/fs/cgroup/cgroup.subtree_control' (fd='4'): Device or resource busy [E][2023-05-28T21:52:56+0000][8807] enableCgroupSubtree():95 Could not apply '+memory' to cgroup.subtree_control in '/sys/fs/cgroup'. If you are running in Docker, nsjail MUST be the root process to use cgroups. [E][2023-05-28T21:52:56+0000][8807] main():354 Couldn't setup parent cgroup (cgroupv2) ``` It could have been confusing because the nsjail may have already been running as real root with full capabilities, e.g., when the user ran the container with the `--privileged --user 0:0` flags. In such a case, the issue is that Docker enters new pid, uts, network, ipc, mount and cgroup namespaces (but not user or time namespaces, fwiw) and I believe that if you do so after the cgroupv2 filesystem is mounted, the root of its filesystem hierarchy will start to render only a subtree, or, generally a limited view of the cgroup. This can be seen below. On the host, we can see the cgroup sub-hierarchies and the `cgroup.subtree_control` shows us the controllers properly: ``` # ls /sys/fs/cgroup/ cgroup.controllers cgroup.threads dev-mqueue.mount memory.numa_stat system.slice cgroup.max.depth cpu.pressure init.scope memory.pressure user.slice cgroup.max.descendants cpuset.cpus.effective io.cost.model memory.stat cgroup.procs cpuset.mems.effective io.cost.qos sys-fs-fuse-connections.mount cgroup.stat cpu.stat io.pressure sys-kernel-config.mount cgroup.subtree_control dev-hugepages.mount io.stat sys-kernel-debug.mount # cat /sys/fs/cgroup/cgroup.subtree_control cpuset cpu io memory hugetlb pids rdma ``` However, even in a privileged container, we can't see the same: ``` # sudo docker run --rm -it --privileged nsjail ls /sys/fs/cgroup cgroup.controllers cpuset.cpus memory.events.local cgroup.events cpuset.cpus.effective memory.high cgroup.freeze cpuset.cpus.partition memory.low cgroup.kill cpuset.mems memory.max cgroup.max.depth cpuset.mems.effective memory.min cgroup.max.descendants hugetlb.2MB.current memory.numa_stat cgroup.procs hugetlb.2MB.events memory.oom.group cgroup.stat hugetlb.2MB.events.local memory.pressure cgroup.subtree_control hugetlb.2MB.max memory.stat cgroup.threads hugetlb.2MB.rsvd.current memory.swap.current cgroup.type hugetlb.2MB.rsvd.max memory.swap.events cpu.idle io.latency memory.swap.high cpu.max io.max memory.swap.max cpu.max.burst io.pressure pids.current cpu.pressure io.stat pids.events cpu.stat io.weight pids.max cpu.weight memory.current rdma.current cpu.weight.nice memory.events rdma.max # sudo docker run --rm -it --privileged nsjail cat /sys/fs/cgroup/cgroup.subtree_control # ``` Of course, the namespaces itself can be seen by comparing them like this: ``` // HOST # ls -la /proc/self/ns total 0 dr-x--x--x 2 root root 0 May 28 22:17 . dr-xr-xr-x 9 root root 0 May 28 22:17 .. lrwxrwxrwx 1 root root 0 May 28 22:17 cgroup -> 'cgroup:[4026531835]' lrwxrwxrwx 1 root root 0 May 28 22:17 ipc -> 'ipc:[4026531839]' lrwxrwxrwx 1 root root 0 May 28 22:17 mnt -> 'mnt:[4026531841]' lrwxrwxrwx 1 root root 0 May 28 22:17 net -> 'net:[4026531840]' lrwxrwxrwx 1 root root 0 May 28 22:17 pid -> 'pid:[4026531836]' lrwxrwxrwx 1 root root 0 May 28 22:17 pid_for_children -> 'pid:[4026531836]' lrwxrwxrwx 1 root root 0 May 28 22:17 time -> 'time:[4026531834]' lrwxrwxrwx 1 root root 0 May 28 22:17 time_for_children -> 'time:[4026531834]' lrwxrwxrwx 1 root root 0 May 28 22:17 user -> 'user:[4026531837]' lrwxrwxrwx 1 root root 0 May 28 22:17 uts -> 'uts:[4026531838]' // CONTAINER # sudo docker run --rm -it --privileged nsjail ls -la /proc/self/ns total 0 dr-x--x--x 2 user user 0 May 28 22:17 . dr-xr-xr-x 9 user user 0 May 28 22:17 .. lrwxrwxrwx 1 user user 0 May 28 22:17 cgroup -> 'cgroup:[4026532381]' lrwxrwxrwx 1 user user 0 May 28 22:17 ipc -> 'ipc:[4026532317]' lrwxrwxrwx 1 user user 0 May 28 22:17 mnt -> 'mnt:[4026532315]' lrwxrwxrwx 1 user user 0 May 28 22:17 net -> 'net:[4026532319]' lrwxrwxrwx 1 user user 0 May 28 22:17 pid -> 'pid:[4026532318]' lrwxrwxrwx 1 user user 0 May 28 22:17 pid_for_children -> 'pid:[4026532318]' lrwxrwxrwx 1 user user 0 May 28 22:17 time -> 'time:[4026531834]' lrwxrwxrwx 1 user user 0 May 28 22:17 time_for_children -> 'time:[4026531834]' lrwxrwxrwx 1 user user 0 May 28 22:17 user -> 'user:[4026531837]' lrwxrwxrwx 1 user user 0 May 28 22:17 uts -> 'uts:[4026532316]' ``` Anyway, passing `--cgroupns=host` solves this problem, which can be seen below: ``` # ls -la /proc/self/ns | grep cgroup lrwxrwxrwx 1 root root 0 May 28 22:18 cgroup -> cgroup:[4026531835] # sudo docker run --rm -it --cgroupns=host --privileged nsjail ls -la /proc/self/ns | grep cgroup lrwxrwxrwx 1 user user 0 May 28 22:19 cgroup -> 'cgroup:[4026531835]' # sudo docker run --rm -it --privileged nsjail ls -la /proc/self/ns | grep cgroup lrwxrwxrwx 1 user user 0 May 28 22:19 cgroup -> 'cgroup:[4026532381]' ```
2023-05-29 06:19:31 +08:00
"Could not apply '%s' to cgroup.subtree_control in '%s'. nsjail MUST be run from root "
"and the cgroup mount path must refer to the root/host cgroup to use cgroupv2. If you "
"use Docker, you may need to run the container with --cgroupns=host so that nsjail can"
" access the host/root cgroupv2 hierarchy. An alternative is mounting (or remounting) "
"the cgroupv2 filesystem but using the flag is just simpler.",
2022-11-23 05:15:01 +08:00
val.c_str(), cgroup_path.c_str());
return false;
}
static bool writeToCgroup(
const std::string &cgroup_path, const std::string &resource, const std::string &value) {
LOG_I("Setting '%s' to '%s'", resource.c_str(), value.c_str());
if (!util::writeBufToFile(
(cgroup_path + "/" + resource).c_str(), value.c_str(), value.length(), O_WRONLY)) {
LOG_W("Could not update %s", resource.c_str());
return false;
}
return true;
}
static void removeCgroup(const std::string &cgroup_path) {
LOG_D("Remove '%s'", cgroup_path.c_str());
if (rmdir(cgroup_path.c_str()) == -1) {
PLOG_W("rmdir('%s') failed", cgroup_path.c_str());
}
}
static bool needMemoryController(nsjconf_t *nsjconf) {
2022-11-23 05:19:05 +08:00
/*
* Check if we need 'memory'
* This matches the check in initNsFromParentMem()
*/
ssize_t swap_max = nsjconf->cgroup_mem_swap_max;
if (nsjconf->cgroup_mem_memsw_max > (size_t)0) {
swap_max = nsjconf->cgroup_mem_memsw_max - nsjconf->cgroup_mem_max;
}
if (nsjconf->cgroup_mem_max == (size_t)0 && swap_max < (ssize_t)0) {
return false;
}
return true;
}
static bool needPidsController(nsjconf_t *nsjconf) {
return nsjconf->cgroup_pids_max != 0;
}
static bool needCpuController(nsjconf_t *nsjconf) {
return nsjconf->cgroup_cpu_ms_per_sec != 0U;
}
2022-11-23 05:19:05 +08:00
/*
* We will use this buf to read from cgroup.subtree_control to see if
* the root cgroup has the necessary controllers listed
*/
#define SUBTREE_CONTROL_BUF_LEN 0x40
bool setup(nsjconf_t *nsjconf) {
2022-11-23 05:19:05 +08:00
/*
* Read from cgroup.subtree_control in the root to see if
* the controllers we need are there.
*/
auto p = nsjconf->cgroupv2_mount + "/cgroup.subtree_control";
char buf[SUBTREE_CONTROL_BUF_LEN];
2022-11-23 05:15:01 +08:00
int read = util::readFromFile(p.c_str(), buf, SUBTREE_CONTROL_BUF_LEN - 1);
if (read < 0) {
LOG_W("cgroupv2 setup: Could not read root subtree_control");
return false;
}
buf[read] = 0;
2022-11-23 05:19:05 +08:00
/* Are the controllers we need there? */
bool subtree_ok = (!needMemoryController(nsjconf) || strstr(buf, "memory")) &&
2022-11-23 05:15:01 +08:00
(!needPidsController(nsjconf) || strstr(buf, "pids")) &&
(!needCpuController(nsjconf) || strstr(buf, "cpu"));
if (!subtree_ok) {
2022-11-23 05:19:05 +08:00
/* Now we can write to the root cgroup.subtree_control */
if (needMemoryController(nsjconf)) {
RETURN_ON_FAILURE(enableCgroupSubtree(nsjconf, "memory", getpid()));
}
if (needPidsController(nsjconf)) {
RETURN_ON_FAILURE(enableCgroupSubtree(nsjconf, "pids", getpid()));
}
if (needCpuController(nsjconf)) {
RETURN_ON_FAILURE(enableCgroupSubtree(nsjconf, "cpu", getpid()));
}
}
return true;
}
bool detectCgroupv2(nsjconf_t *nsjconf) {
2022-11-23 05:19:05 +08:00
/*
* Check cgroupv2_mount, if it is a cgroup2 mount, use it.
*/
struct statfs buf;
if (statfs(nsjconf->cgroupv2_mount.c_str(), &buf)) {
LOG_D("statfs %s failed with %d", nsjconf->cgroupv2_mount.c_str(), errno);
nsjconf->use_cgroupv2 = false;
return false;
}
nsjconf->use_cgroupv2 = (buf.f_type == CGROUP2_SUPER_MAGIC);
return true;
}
static bool initNsFromParentMem(nsjconf_t *nsjconf, pid_t pid) {
ssize_t swap_max = nsjconf->cgroup_mem_swap_max;
if (nsjconf->cgroup_mem_memsw_max > (size_t)0) {
swap_max = nsjconf->cgroup_mem_memsw_max - nsjconf->cgroup_mem_max;
}
if (nsjconf->cgroup_mem_max == (size_t)0 && swap_max < (ssize_t)0) {
return true;
}
std::string cgroup_path = getCgroupPath(nsjconf, pid);
RETURN_ON_FAILURE(createCgroup(cgroup_path, pid));
RETURN_ON_FAILURE(addPidToProcList(cgroup_path, pid));
2021-10-27 03:27:46 +08:00
if (nsjconf->cgroup_mem_max > (size_t)0) {
RETURN_ON_FAILURE(writeToCgroup(
cgroup_path, "memory.max", std::to_string(nsjconf->cgroup_mem_max)));
}
if (swap_max >= (ssize_t)0) {
RETURN_ON_FAILURE(
writeToCgroup(cgroup_path, "memory.swap.max", std::to_string(swap_max)));
2021-10-27 03:27:46 +08:00
}
return true;
}
static bool initNsFromParentPids(nsjconf_t *nsjconf, pid_t pid) {
if (nsjconf->cgroup_pids_max == 0U) {
return true;
}
std::string cgroup_path = getCgroupPath(nsjconf, pid);
RETURN_ON_FAILURE(createCgroup(cgroup_path, pid));
RETURN_ON_FAILURE(addPidToProcList(cgroup_path, pid));
return writeToCgroup(cgroup_path, "pids.max", std::to_string(nsjconf->cgroup_pids_max));
}
static bool initNsFromParentCpu(nsjconf_t *nsjconf, pid_t pid) {
if (nsjconf->cgroup_cpu_ms_per_sec == 0U) {
return true;
}
std::string cgroup_path = getCgroupPath(nsjconf, pid);
RETURN_ON_FAILURE(createCgroup(cgroup_path, pid));
RETURN_ON_FAILURE(addPidToProcList(cgroup_path, pid));
2022-11-23 05:19:05 +08:00
/*
* The maximum bandwidth limit in the format: `$MAX $PERIOD`.
* This indicates that the group may consume up to $MAX in each $PERIOD
* duration.
*/
std::string cpu_ms_per_sec_str = std::to_string(nsjconf->cgroup_cpu_ms_per_sec * 1000U);
cpu_ms_per_sec_str += " 1000000";
return writeToCgroup(cgroup_path, "cpu.max", cpu_ms_per_sec_str);
}
bool initNsFromParent(nsjconf_t *nsjconf, pid_t pid) {
RETURN_ON_FAILURE(initNsFromParentMem(nsjconf, pid));
RETURN_ON_FAILURE(initNsFromParentPids(nsjconf, pid));
return initNsFromParentCpu(nsjconf, pid);
}
void finishFromParent(nsjconf_t *nsjconf, pid_t pid) {
if (nsjconf->cgroup_mem_max != (size_t)0 || nsjconf->cgroup_pids_max != 0U ||
nsjconf->cgroup_cpu_ms_per_sec != 0U) {
removeCgroup(getCgroupPath(nsjconf, pid));
}
}
} // namespace cgroup2