diff --git a/components/fs/littlefs/littlefs/.github/workflows/post-release.yml b/components/fs/littlefs/littlefs/.github/workflows/post-release.yml
new file mode 100644
index 00000000..a44a675d
--- /dev/null
+++ b/components/fs/littlefs/littlefs/.github/workflows/post-release.yml
@@ -0,0 +1,26 @@
+name: post-release
+on:
+ release:
+ branches: [master]
+ types: [released]
+
+jobs:
+ post-release:
+ runs-on: ubuntu-20.04
+ steps:
+ # trigger post-release in dependency repo, this indirection allows the
+ # dependency repo to be updated often without affecting this repo. At
+ # the time of this comment, the dependency repo is responsible for
+ # creating PRs for other dependent repos post-release.
+ - name: trigger-post-release
+ continue-on-error: true
+ run: |
+ curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+ "$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \
+ -d "$(jq -n '{
+ event_type: "post-release",
+ client_payload: {
+ repo: env.GITHUB_REPOSITORY,
+ version: "${{github.event.release.tag_name}}"}}' \
+ | tee /dev/stderr)"
+
diff --git a/components/fs/littlefs/littlefs/.github/workflows/release.yml b/components/fs/littlefs/littlefs/.github/workflows/release.yml
new file mode 100644
index 00000000..c38b8de6
--- /dev/null
+++ b/components/fs/littlefs/littlefs/.github/workflows/release.yml
@@ -0,0 +1,196 @@
+name: release
+on:
+ workflow_run:
+ workflows: [test]
+ branches: [master]
+ types: [completed]
+
+jobs:
+ release:
+ runs-on: ubuntu-20.04
+
+ # need to manually check for a couple things
+ # - tests passed?
+ # - we are the most recent commit on master?
+ if: ${{github.event.workflow_run.conclusion == 'success' &&
+ github.event.workflow_run.head_sha == github.sha}}
+
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ ref: ${{github.event.workflow_run.head_sha}}
+ # need workflow access since we push branches
+ # containing workflows
+ token: ${{secrets.BOT_TOKEN}}
+ # need all tags
+ fetch-depth: 0
+
+ # try to get results from tests
+ - uses: dawidd6/action-download-artifact@v2
+ continue-on-error: true
+ with:
+ workflow: ${{github.event.workflow_run.name}}
+ run_id: ${{github.event.workflow_run.id}}
+ name: results
+ path: results
+
+ - name: find-version
+ run: |
+ # rip version from lfs.h
+ LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \
+ | awk '{print $3}')"
+ LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))"
+ LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >> 0)))"
+
+ # find a new patch version based on what we find in our tags
+ LFS_VERSION_PATCH="$( \
+ ( git describe --tags --abbrev=0 \
+ --match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \
+ || echo 'v0.0.-1' ) \
+ | awk -F '.' '{print $3+1}')"
+
+ # found new version
+ LFS_VERSION="v$LFS_VERSION_MAJOR`
+ `.$LFS_VERSION_MINOR`
+ `.$LFS_VERSION_PATCH"
+ echo "LFS_VERSION=$LFS_VERSION"
+ echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV
+ echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV
+ echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV
+ echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV
+
+ # try to find previous version?
+ - name: find-prev-version
+ continue-on-error: true
+ run: |
+ LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')"
+ echo "LFS_PREV_VERSION=$LFS_PREV_VERSION"
+ echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV
+
+ # try to find results from tests
+ - name: collect-results
+ run: |
+ # previous results to compare against?
+ [ -n "$LFS_PREV_VERSION" ] && curl -sS \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
+ `status/$LFS_PREV_VERSION?per_page=100" \
+ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
+ >> prev-results.json \
+ || true
+
+ # build table for GitHub
+ echo "
" >> results.txt
+ echo "" >> results.txt
+ echo "" >> results.txt
+ echo "Configuration | " >> results.txt
+ for r in Code Stack Structs Coverage
+ do
+ echo "$r | " >> results.txt
+ done
+ echo "
" >> results.txt
+ echo "" >> results.txt
+
+ echo "" >> results.txt
+ for c in "" readonly threadsafe migrate error-asserts
+ do
+ echo "" >> results.txt
+ c_or_default=${c:-default}
+ echo "${c_or_default^} | " >> results.txt
+ for r in code stack structs
+ do
+ # per-config results
+ echo "" >> results.txt
+ [ -e results/thumb${c:+-$c}.csv ] && ( \
+ export PREV="$(jq -re '
+ select(.context == "'"results (thumb${c:+, $c}) / $r"'").description
+ | capture("(?[0-9∞]+)").result' \
+ prev-results.json || echo 0)"
+ ./scripts/summary.py results/thumb${c:+-$c}.csv -f $r -Y | awk '
+ NR==2 {printf "%s B",$2}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
+ NR==2 {printf "\n"}' \
+ | sed -e 's/ /\ /g' \
+ >> results.txt)
+ echo " | " >> results.txt
+ done
+ # coverage results
+ if [ -z $c ]
+ then
+ echo "" >> results.txt
+ [ -e results/coverage.csv ] && ( \
+ export PREV="$(jq -re '
+ select(.context == "results / coverage").description
+ | capture("(?[0-9\\.]+)").result' \
+ prev-results.json || echo 0)"
+ ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
+ NR==2 {printf "%.1f%% of %d lines",$4,$3}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
+ NR==2 {printf "\n"}' \
+ | sed -e 's/ /\ /g' \
+ >> results.txt)
+ echo " | " >> results.txt
+ fi
+ echo "
" >> results.txt
+ done
+ echo "" >> results.txt
+ echo "
" >> results.txt
+
+ cat results.txt
+
+ # find changes from history
+ - name: collect-changes
+ run: |
+ [ -n "$LFS_PREV_VERSION" ] || exit 0
+ # use explicit link to github commit so that release notes can
+ # be copied elsewhere
+ git log "$LFS_PREV_VERSION.." \
+ --grep='^Merge' --invert-grep \
+ --format="format:[\`%h\`](`
+ `https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \
+ > changes.txt
+ echo "CHANGES:"
+ cat changes.txt
+
+ # create and update major branches (vN and vN-prefix)
+ - name: create-major-branches
+ run: |
+ # create major branch
+ git branch "v$LFS_VERSION_MAJOR" HEAD
+
+ # create major prefix branch
+ git config user.name ${{secrets.BOT_USER}}
+ git config user.email ${{secrets.BOT_EMAIL}}
+ git fetch "https://github.com/$GITHUB_REPOSITORY.git" \
+ "v$LFS_VERSION_MAJOR-prefix" || true
+ ./scripts/prefix.py "lfs$LFS_VERSION_MAJOR"
+ git branch "v$LFS_VERSION_MAJOR-prefix" $( \
+ git commit-tree $(git write-tree) \
+ $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
+ -p HEAD \
+ -m "Generated v$LFS_VERSION_MAJOR prefixes")
+ git reset --hard
+
+ # push!
+ git push --atomic origin \
+ "v$LFS_VERSION_MAJOR" \
+ "v$LFS_VERSION_MAJOR-prefix"
+
+ # build release notes
+ - name: create-release
+ run: |
+ # create release and patch version tag (vN.N.N)
+ # only draft if not a patch release
+ [ -e results.txt ] && export RESULTS="$(cat results.txt)"
+ [ -e changes.txt ] && export CHANGES="$(cat changes.txt)"
+ curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \
+ -d "$(jq -n '{
+ tag_name: env.LFS_VERSION,
+ name: env.LFS_VERSION | rtrimstr(".0"),
+ target_commitish: "${{github.event.workflow_run.head_sha}}",
+ draft: env.LFS_VERSION | endswith(".0"),
+ body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \
+ | tee /dev/stderr)"
+
diff --git a/components/fs/littlefs/littlefs/.github/workflows/status.yml b/components/fs/littlefs/littlefs/.github/workflows/status.yml
new file mode 100644
index 00000000..d28b17cc
--- /dev/null
+++ b/components/fs/littlefs/littlefs/.github/workflows/status.yml
@@ -0,0 +1,55 @@
+name: status
+on:
+ workflow_run:
+ workflows: [test]
+ types: [completed]
+
+jobs:
+ status:
+ runs-on: ubuntu-20.04
+ steps:
+ # custom statuses?
+ - uses: dawidd6/action-download-artifact@v2
+ continue-on-error: true
+ with:
+ workflow: ${{github.event.workflow_run.name}}
+ run_id: ${{github.event.workflow_run.id}}
+ name: status
+ path: status
+ - name: update-status
+ continue-on-error: true
+ run: |
+ ls status
+ for s in $(shopt -s nullglob ; echo status/*.json)
+ do
+ # parse requested status
+ export STATE="$(jq -er '.state' $s)"
+ export CONTEXT="$(jq -er '.context' $s)"
+ export DESCRIPTION="$(jq -er '.description' $s)"
+ # help lookup URL for job/steps because GitHub makes
+ # it VERY HARD to link to specific jobs
+ export TARGET_URL="$(
+ jq -er '.target_url // empty' $s || (
+ export TARGET_JOB="$(jq -er '.target_job' $s)"
+ export TARGET_STEP="$(jq -er '.target_step // ""' $s)"
+ curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/`
+ `${{github.event.workflow_run.id}}/jobs" \
+ | jq -er '.jobs[]
+ | select(.name == env.TARGET_JOB)
+ | .html_url
+ + "?check_suite_focus=true"
+ + ((.steps[]
+ | select(.name == env.TARGET_STEP)
+ | "#step:\(.number):0") // "")'))"
+ # update status
+ curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/`
+ `${{github.event.workflow_run.head_sha}}" \
+ -d "$(jq -n '{
+ state: env.STATE,
+ context: env.CONTEXT,
+ description: env.DESCRIPTION,
+ target_url: env.TARGET_URL}' \
+ | tee /dev/stderr)"
+ done
diff --git a/components/fs/littlefs/littlefs/.github/workflows/test.yml b/components/fs/littlefs/littlefs/.github/workflows/test.yml
new file mode 100644
index 00000000..81f00c1e
--- /dev/null
+++ b/components/fs/littlefs/littlefs/.github/workflows/test.yml
@@ -0,0 +1,493 @@
+name: test
+on: [push, pull_request]
+
+env:
+ CFLAGS: -Werror
+ MAKEFLAGS: -j
+
+jobs:
+ # run tests
+ test:
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ arch: [x86_64, thumb, mips, powerpc]
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: install
+ run: |
+ # need a few additional tools
+ #
+ # note this includes gcc-10, which is required for -fcallgraph-info=su
+ sudo apt-get update -qq
+ sudo apt-get install -qq gcc-10 python3 python3-pip lcov
+ sudo pip3 install toml
+ echo "CC=gcc-10" >> $GITHUB_ENV
+ gcc-10 --version
+ lcov --version
+ python3 --version
+
+ # need newer lcov version for gcc-10
+ #sudo apt-get remove lcov
+ #wget https://launchpad.net/ubuntu/+archive/primary/+files/lcov_1.15-1_all.deb
+ #sudo apt install ./lcov_1.15-1_all.deb
+ #lcov --version
+ #which lcov
+ #ls -lha /usr/bin/lcov
+ wget https://github.com/linux-test-project/lcov/releases/download/v1.15/lcov-1.15.tar.gz
+ tar xf lcov-1.15.tar.gz
+ sudo make -C lcov-1.15 install
+
+ # setup a ram-backed disk to speed up reentrant tests
+ mkdir disks
+ sudo mount -t tmpfs -o size=100m tmpfs disks
+ TESTFLAGS="$TESTFLAGS --disk=disks/disk"
+
+ # collect coverage
+ mkdir -p coverage
+ TESTFLAGS="$TESTFLAGS --coverage=`
+ `coverage/${{github.job}}-${{matrix.arch}}.info"
+
+ echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
+
+ # cross-compile with ARM Thumb (32-bit, little-endian)
+ - name: install-thumb
+ if: ${{matrix.arch == 'thumb'}}
+ run: |
+ sudo apt-get install -qq \
+ gcc-10-arm-linux-gnueabi \
+ libc6-dev-armel-cross \
+ qemu-user
+ echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV
+ echo "EXEC=qemu-arm" >> $GITHUB_ENV
+ arm-linux-gnueabi-gcc-10 --version
+ qemu-arm -version
+ # cross-compile with MIPS (32-bit, big-endian)
+ - name: install-mips
+ if: ${{matrix.arch == 'mips'}}
+ run: |
+ sudo apt-get install -qq \
+ gcc-10-mips-linux-gnu \
+ libc6-dev-mips-cross \
+ qemu-user
+ echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
+ echo "EXEC=qemu-mips" >> $GITHUB_ENV
+ mips-linux-gnu-gcc-10 --version
+ qemu-mips -version
+ # cross-compile with PowerPC (32-bit, big-endian)
+ - name: install-powerpc
+ if: ${{matrix.arch == 'powerpc'}}
+ run: |
+ sudo apt-get install -qq \
+ gcc-10-powerpc-linux-gnu \
+ libc6-dev-powerpc-cross \
+ qemu-user
+ echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
+ echo "EXEC=qemu-ppc" >> $GITHUB_ENV
+ powerpc-linux-gnu-gcc-10 --version
+ qemu-ppc -version
+
+ # make sure example can at least compile
+ - name: test-example
+ run: |
+ sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
+ make all CFLAGS+=" \
+ -Duser_provided_block_device_read=NULL \
+ -Duser_provided_block_device_prog=NULL \
+ -Duser_provided_block_device_erase=NULL \
+ -Duser_provided_block_device_sync=NULL \
+ -include stdio.h"
+ rm test.c
+
+ # test configurations
+ # normal+reentrant tests
+ - name: test-default
+ run: |
+ make clean
+ make test TESTFLAGS+="-nrk"
+ # NOR flash: read/prog = 1 block = 4KiB
+ - name: test-nor
+ run: |
+ make clean
+ make test TESTFLAGS+="-nrk \
+ -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
+ # SD/eMMC: read/prog = 512 block = 512
+ - name: test-emmc
+ run: |
+ make clean
+ make test TESTFLAGS+="-nrk \
+ -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
+ # NAND flash: read/prog = 4KiB block = 32KiB
+ - name: test-nand
+ run: |
+ make clean
+ make test TESTFLAGS+="-nrk \
+ -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
+ # other extreme geometries that are useful for various corner cases
+ - name: test-no-intrinsics
+ run: |
+ make clean
+ make test TESTFLAGS+="-nrk \
+ -DLFS_NO_INTRINSICS"
+ - name: test-byte-writes
+ # it just takes too long to test byte-level writes when in qemu,
+ # should be plenty covered by the other configurations
+ if: ${{matrix.arch == 'x86_64'}}
+ run: |
+ make clean
+ make test TESTFLAGS+="-nrk \
+ -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
+ - name: test-block-cycles
+ run: |
+ make clean
+ make test TESTFLAGS+="-nrk \
+ -DLFS_BLOCK_CYCLES=1"
+ - name: test-odd-block-count
+ run: |
+ make clean
+ make test TESTFLAGS+="-nrk \
+ -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
+ - name: test-odd-block-size
+ run: |
+ make clean
+ make test TESTFLAGS+="-nrk \
+ -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
+
+ # upload coverage for later coverage
+ - name: upload-coverage
+ uses: actions/upload-artifact@v2
+ with:
+ name: coverage
+ path: coverage
+ retention-days: 1
+
+ # update results
+ - name: results
+ run: |
+ mkdir -p results
+ make clean
+ make lfs.csv \
+ CFLAGS+=" \
+ -DLFS_NO_ASSERT \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR"
+ cp lfs.csv results/${{matrix.arch}}.csv
+ ./scripts/summary.py results/${{matrix.arch}}.csv
+ - name: results-readonly
+ run: |
+ mkdir -p results
+ make clean
+ make lfs.csv \
+ CFLAGS+=" \
+ -DLFS_NO_ASSERT \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR \
+ -DLFS_READONLY"
+ cp lfs.csv results/${{matrix.arch}}-readonly.csv
+ ./scripts/summary.py results/${{matrix.arch}}-readonly.csv
+ - name: results-threadsafe
+ run: |
+ mkdir -p results
+ make clean
+ make lfs.csv \
+ CFLAGS+=" \
+ -DLFS_NO_ASSERT \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR \
+ -DLFS_THREADSAFE"
+ cp lfs.csv results/${{matrix.arch}}-threadsafe.csv
+ ./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv
+ - name: results-migrate
+ run: |
+ mkdir -p results
+ make clean
+ make lfs.csv \
+ CFLAGS+=" \
+ -DLFS_NO_ASSERT \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR \
+ -DLFS_MIGRATE"
+ cp lfs.csv results/${{matrix.arch}}-migrate.csv
+ ./scripts/summary.py results/${{matrix.arch}}-migrate.csv
+ - name: results-error-asserts
+ run: |
+ mkdir -p results
+ make clean
+ make lfs.csv \
+ CFLAGS+=" \
+ -DLFS_NO_DEBUG \
+ -DLFS_NO_WARN \
+ -DLFS_NO_ERROR \
+ -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'"
+ cp lfs.csv results/${{matrix.arch}}-error-asserts.csv
+ ./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv
+ - name: upload-results
+ uses: actions/upload-artifact@v2
+ with:
+ name: results
+ path: results
+
+ # create statuses with results
+ - name: collect-status
+ run: |
+ mkdir -p status
+ for f in $(shopt -s nullglob ; echo results/*.csv)
+ do
+ export STEP="results$(
+ echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')"
+ for r in code stack structs
+ do
+ export CONTEXT="results (${{matrix.arch}}$(
+ echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r"
+ export PREV="$(curl -sS \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
+ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
+ | select(.context == env.CONTEXT).description
+ | capture("(?[0-9∞]+)").result' \
+ || echo 0)"
+ export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk '
+ NR==2 {printf "%s B",$2}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
+ jq -n '{
+ state: "success",
+ context: env.CONTEXT,
+ description: env.DESCRIPTION,
+ target_job: "${{github.job}} (${{matrix.arch}})",
+ target_step: env.STEP}' \
+ | tee status/$r-${{matrix.arch}}$(
+ echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json
+ done
+ done
+ - name: upload-status
+ uses: actions/upload-artifact@v2
+ with:
+ name: status
+ path: status
+ retention-days: 1
+
+ # run under Valgrind to check for memory errors
+ valgrind:
+ runs-on: ubuntu-20.04
+ steps:
+ - uses: actions/checkout@v2
+ - name: install
+ run: |
+ # need toml, also pip3 isn't installed by default?
+ sudo apt-get update -qq
+ sudo apt-get install -qq python3 python3-pip
+ sudo pip3 install toml
+ - name: install-valgrind
+ run: |
+ sudo apt-get update -qq
+ sudo apt-get install -qq valgrind
+ valgrind --version
+ # normal tests, we don't need to test all geometries
+ - name: test-valgrind
+ run: make test TESTFLAGS+="-k --valgrind"
+
+ # test that compilation is warning free under clang
+ clang:
+ runs-on: ubuntu-20.04
+ steps:
+ - uses: actions/checkout@v2
+ - name: install
+ run: |
+ # need toml, also pip3 isn't installed by default?
+ sudo apt-get update -qq
+ sudo apt-get install -qq python3 python3-pip
+ sudo pip3 install toml
+ - name: install-clang
+ run: |
+ sudo apt-get update -qq
+ sudo apt-get install -qq clang
+ echo "CC=clang" >> $GITHUB_ENV
+ clang --version
+ # no reason to not test again
+ - name: test-clang
+ run: make test TESTFLAGS+="-k"
+
+ # self-host with littlefs-fuse for a fuzz-like test
+ fuse:
+ runs-on: ubuntu-20.04
+ if: ${{!endsWith(github.ref, '-prefix')}}
+ steps:
+ - uses: actions/checkout@v2
+ - name: install
+ run: |
+ # need toml, also pip3 isn't installed by default?
+ sudo apt-get update -qq
+ sudo apt-get install -qq python3 python3-pip libfuse-dev
+ sudo pip3 install toml
+ fusermount -V
+ gcc --version
+ - uses: actions/checkout@v2
+ with:
+ repository: littlefs-project/littlefs-fuse
+ ref: v2
+ path: littlefs-fuse
+ - name: setup
+ run: |
+ # copy our new version into littlefs-fuse
+ rm -rf littlefs-fuse/littlefs/*
+ cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
+
+ # setup disk for littlefs-fuse
+ mkdir mount
+ LOOP=$(sudo losetup -f)
+ sudo chmod a+rw $LOOP
+ dd if=/dev/zero bs=512 count=128K of=disk
+ losetup $LOOP disk
+ echo "LOOP=$LOOP" >> $GITHUB_ENV
+ - name: test
+ run: |
+ # self-host test
+ make -C littlefs-fuse
+
+ littlefs-fuse/lfs --format $LOOP
+ littlefs-fuse/lfs $LOOP mount
+
+ ls mount
+ mkdir mount/littlefs
+ cp -r $(git ls-tree --name-only HEAD) mount/littlefs
+ cd mount/littlefs
+ stat .
+ ls -flh
+ make -B test
+
+ # test migration using littlefs-fuse
+ migrate:
+ runs-on: ubuntu-20.04
+ if: ${{!endsWith(github.ref, '-prefix')}}
+ steps:
+ - uses: actions/checkout@v2
+ - name: install
+ run: |
+ # need toml, also pip3 isn't installed by default?
+ sudo apt-get update -qq
+ sudo apt-get install -qq python3 python3-pip libfuse-dev
+ sudo pip3 install toml
+ fusermount -V
+ gcc --version
+ - uses: actions/checkout@v2
+ with:
+ repository: littlefs-project/littlefs-fuse
+ ref: v2
+ path: v2
+ - uses: actions/checkout@v2
+ with:
+ repository: littlefs-project/littlefs-fuse
+ ref: v1
+ path: v1
+ - name: setup
+ run: |
+ # copy our new version into littlefs-fuse
+ rm -rf v2/littlefs/*
+ cp -r $(git ls-tree --name-only HEAD) v2/littlefs
+
+ # setup disk for littlefs-fuse
+ mkdir mount
+ LOOP=$(sudo losetup -f)
+ sudo chmod a+rw $LOOP
+ dd if=/dev/zero bs=512 count=128K of=disk
+ losetup $LOOP disk
+ echo "LOOP=$LOOP" >> $GITHUB_ENV
+ - name: test
+ run: |
+ # compile v1 and v2
+ make -C v1
+ make -C v2
+
+ # run self-host test with v1
+ v1/lfs --format $LOOP
+ v1/lfs $LOOP mount
+
+ ls mount
+ mkdir mount/littlefs
+ cp -r $(git ls-tree --name-only HEAD) mount/littlefs
+ cd mount/littlefs
+ stat .
+ ls -flh
+ make -B test
+
+ # attempt to migrate
+ cd ../..
+ fusermount -u mount
+
+ v2/lfs --migrate $LOOP
+ v2/lfs $LOOP mount
+
+ # run self-host test with v2 right where we left off
+ ls mount
+ cd mount/littlefs
+ stat .
+ ls -flh
+ make -B test
+
+ # collect coverage info
+ coverage:
+ runs-on: ubuntu-20.04
+ needs: [test]
+ steps:
+ - uses: actions/checkout@v2
+ - name: install
+ run: |
+ sudo apt-get update -qq
+ sudo apt-get install -qq python3 python3-pip lcov
+ sudo pip3 install toml
+ # yes we continue-on-error nearly every step, continue-on-error
+ # at job level apparently still marks a job as failed, which isn't
+ # what we want
+ - uses: actions/download-artifact@v2
+ continue-on-error: true
+ with:
+ name: coverage
+ path: coverage
+ - name: results-coverage
+ continue-on-error: true
+ run: |
+ mkdir -p results
+ lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
+ -o results/coverage.info
+ ./scripts/coverage.py results/coverage.info -o results/coverage.csv
+ - name: upload-results
+ uses: actions/upload-artifact@v2
+ with:
+ name: results
+ path: results
+ - name: collect-status
+ run: |
+ mkdir -p status
+ [ -e results/coverage.csv ] || exit 0
+ export STEP="results-coverage"
+ export CONTEXT="results / coverage"
+ export PREV="$(curl -sS \
+ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
+ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
+ | select(.context == env.CONTEXT).description
+ | capture("(?[0-9\\.]+)").result' \
+ || echo 0)"
+ export DESCRIPTION="$(
+ ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
+ NR==2 {printf "%.1f%% of %d lines",$4,$3}
+ NR==2 && ENVIRON["PREV"]+0 != 0 {
+ printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
+ jq -n '{
+ state: "success",
+ context: env.CONTEXT,
+ description: env.DESCRIPTION,
+ target_job: "${{github.job}}",
+ target_step: env.STEP}' \
+ | tee status/coverage.json
+ - name: upload-status
+ uses: actions/upload-artifact@v2
+ with:
+ name: status
+ path: status
+ retention-days: 1
diff --git a/components/fs/littlefs/littlefs/.gitignore b/components/fs/littlefs/littlefs/.gitignore
new file mode 100644
index 00000000..3f7b860e
--- /dev/null
+++ b/components/fs/littlefs/littlefs/.gitignore
@@ -0,0 +1,14 @@
+# Compilation output
+*.o
+*.d
+*.a
+*.ci
+*.csv
+
+# Testing things
+blocks/
+lfs
+test.c
+tests/*.toml.*
+scripts/__pycache__
+.gdb_history
diff --git a/components/fs/littlefs/littlefs/DESIGN.md b/components/fs/littlefs/littlefs/DESIGN.md
new file mode 100644
index 00000000..1d02ba3b
--- /dev/null
+++ b/components/fs/littlefs/littlefs/DESIGN.md
@@ -0,0 +1,2173 @@
+## The design of littlefs
+
+A little fail-safe filesystem designed for microcontrollers.
+
+```
+ | | | .---._____
+ .-----. | |
+--|o |---| littlefs |
+--| |---| |
+ '-----' '----------'
+ | | |
+```
+
+littlefs was originally built as an experiment to learn about filesystem design
+in the context of microcontrollers. The question was: How would you build a
+filesystem that is resilient to power-loss and flash wear without using
+unbounded memory?
+
+This document covers the high-level design of littlefs, how it is different
+than other filesystems, and the design decisions that got us here. For the
+low-level details covering every bit on disk, check out [SPEC.md](SPEC.md).
+
+## The problem
+
+The embedded systems littlefs targets are usually 32-bit microcontrollers with
+around 32 KiB of RAM and 512 KiB of ROM. These are often paired with SPI NOR
+flash chips with about 4 MiB of flash storage. These devices are too small for
+Linux and most existing filesystems, requiring code written specifically with
+size in mind.
+
+Flash itself is an interesting piece of technology with its own quirks and
+nuance. Unlike other forms of storage, writing to flash requires two
+operations: erasing and programming. Programming (setting bits to 0) is
+relatively cheap and can be very granular. Erasing however (setting bits to 1),
+requires an expensive and destructive operation which gives flash its name.
+[Wikipedia][wikipedia-flash] has more information on how exactly flash works.
+
+To make the situation more annoying, it's very common for these embedded
+systems to lose power at any time. Usually, microcontroller code is simple and
+reactive, with no concept of a shutdown routine. This presents a big challenge
+for persistent storage, where an unlucky power loss can corrupt the storage and
+leave a device unrecoverable.
+
+This leaves us with three major requirements for an embedded filesystem.
+
+1. **Power-loss resilience** - On these systems, power can be lost at any time.
+ If a power loss corrupts any persistent data structures, this can cause the
+ device to become unrecoverable. An embedded filesystem must be designed to
+ recover from a power loss during any write operation.
+
+1. **Wear leveling** - Writing to flash is destructive. If a filesystem
+ repeatedly writes to the same block, eventually that block will wear out.
+ Filesystems that don't take wear into account can easily burn through blocks
+ used to store frequently updated metadata and cause a device's early death.
+
+1. **Bounded RAM/ROM** - If the above requirements weren't enough, these
+ systems also have very limited amounts of memory. This prevents many
+ existing filesystem designs, which can lean on relatively large amounts of
+ RAM to temporarily store filesystem metadata.
+
+ For ROM, this means we need to keep our design simple and reuse code paths
+ were possible. For RAM we have a stronger requirement, all RAM usage is
+ bounded. This means RAM usage does not grow as the filesystem changes in
+ size or number of files. This creates a unique challenge as even presumably
+ simple operations, such as traversing the filesystem, become surprisingly
+ difficult.
+
+## Existing designs?
+
+So, what's already out there? There are, of course, many different filesystems,
+however they often share and borrow feature from each other. If we look at
+power-loss resilience and wear leveling, we can narrow these down to a handful
+of designs.
+
+1. First we have the non-resilient, block based filesystems, such as [FAT] and
+ [ext2]. These are the earliest filesystem designs and often the most simple.
+ Here storage is divided into blocks, with each file being stored in a
+ collection of blocks. Without modifications, these filesystems are not
+ power-loss resilient, so updating a file is a simple as rewriting the blocks
+ in place.
+
+ ```
+ .--------.
+ | root |
+ | |
+ | |
+ '--------'
+ .-' '-.
+ v v
+ .--------. .--------.
+ | A | | B |
+ | | | |
+ | | | |
+ '--------' '--------'
+ .-' .-' '-.
+ v v v
+ .--------. .--------. .--------.
+ | C | | D | | E |
+ | | | | | |
+ | | | | | |
+ '--------' '--------' '--------'
+ ```
+
+ Because of their simplicity, these filesystems are usually both the fastest
+ and smallest. However the lack of power resilience is not great, and the
+ binding relationship of storage location and data removes the filesystem's
+ ability to manage wear.
+
+2. In a completely different direction, we have logging filesystems, such as
+ [JFFS], [YAFFS], and [SPIFFS], storage location is not bound to a piece of
+ data, instead the entire storage is used for a circular log which is
+ appended with every change made to the filesystem. Writing appends new
+ changes, while reading requires traversing the log to reconstruct a file.
+ Some logging filesystems cache files to avoid the read cost, but this comes
+ at a tradeoff of RAM.
+
+ ```
+ v
+ .--------.--------.--------.--------.--------.--------.--------.--------.
+ | C | new B | new A | | A | B |
+ | | | |-> | | |
+ | | | | | | |
+ '--------'--------'--------'--------'--------'--------'--------'--------'
+ ```
+
+ Logging filesystem are beautifully elegant. With a checksum, we can easily
+ detect power-loss and fall back to the previous state by ignoring failed
+ appends. And if that wasn't good enough, their cyclic nature means that
+ logging filesystems distribute wear across storage perfectly.
+
+ The main downside is performance. If we look at garbage collection, the
+ process of cleaning up outdated data from the end of the log, I've yet to
+ see a pure logging filesystem that does not have one of these two costs:
+
+ 1. _O(n²)_ runtime
+ 2. _O(n)_ RAM
+
+ SPIFFS is a very interesting case here, as it uses the fact that repeated
+ programs to NOR flash is both atomic and masking. This is a very neat
+ solution, however it limits the type of storage you can support.
+
+3. Perhaps the most common type of filesystem, a journaling filesystem is the
+ offspring that happens when you mate a block based filesystem with a logging
+ filesystem. [ext4] and [NTFS] are good examples. Here, we take a normal
+ block based filesystem and add a bounded log where we note every change
+ before it occurs.
+
+ ```
+ journal
+ .--------.--------.
+ .--------. | C'| D'| | E'|
+ | root |-->| | |-> | |
+ | | | | | | |
+ | | '--------'--------'
+ '--------'
+ .-' '-.
+ v v
+ .--------. .--------.
+ | A | | B |
+ | | | |
+ | | | |
+ '--------' '--------'
+ .-' .-' '-.
+ v v v
+ .--------. .--------. .--------.
+ | C | | D | | E |
+ | | | | | |
+ | | | | | |
+ '--------' '--------' '--------'
+ ```
+
+
+ This sort of filesystem takes the best from both worlds. Performance can be
+ as fast as a block based filesystem (though updating the journal does have
+ a small cost), and atomic updates to the journal allow the filesystem to
+ recover in the event of a power loss.
+
+ Unfortunately, journaling filesystems have a couple of problems. They are
+ fairly complex, since there are effectively two filesystems running in
+ parallel, which comes with a code size cost. They also offer no protection
+ against wear because of the strong relationship between storage location
+ and data.
+
+4. Last but not least we have copy-on-write (COW) filesystems, such as
+ [btrfs] and [ZFS]. These are very similar to other block based filesystems,
+ but instead of updating block inplace, all updates are performed by creating
+ a copy with the changes and replacing any references to the old block with
+ our new block. This recursively pushes all of our problems upwards until we
+ reach the root of our filesystem, which is often stored in a very small log.
+
+ ```
+ .--------. .--------.
+ | root | write |new root|
+ | | ==> | |
+ | | | |
+ '--------' '--------'
+ .-' '-. | '-.
+ | .-------|------------------' v
+ v v v .--------.
+ .--------. .--------. | new B |
+ | A | | B | | |
+ | | | | | |
+ | | | | '--------'
+ '--------' '--------' .-' |
+ .-' .-' '-. .------------|------'
+ | | | | v
+ v v v v .--------.
+ .--------. .--------. .--------. | new D |
+ | C | | D | | E | | |
+ | | | | | | | |
+ | | | | | | '--------'
+ '--------' '--------' '--------'
+ ```
+
+ COW filesystems are interesting. They offer very similar performance to
+ block based filesystems while managing to pull off atomic updates without
+ storing data changes directly in a log. They even disassociate the storage
+ location of data, which creates an opportunity for wear leveling.
+
+ Well, almost. The unbounded upwards movement of updates causes some
+ problems. Because updates to a COW filesystem don't stop until they've
+ reached the root, an update can cascade into a larger set of writes than
+ would be needed for the original data. On top of this, the upward motion
+ focuses these writes into the block, which can wear out much earlier than
+ the rest of the filesystem.
+
+## littlefs
+
+So what does littlefs do?
+
+If we look at existing filesystems, there are two interesting design patterns
+that stand out, but each have their own set of problems. Logging, which
+provides independent atomicity, has poor runtime performance. And COW data
+structures, which perform well, push the atomicity problem upwards.
+
+Can we work around these limitations?
+
+Consider logging. It has either a _O(n²)_ runtime or _O(n)_ RAM cost. We
+can't avoid these costs, _but_ if we put an upper bound on the size we can at
+least prevent the theoretical cost from becoming problem. This relies on the
+super secret computer science hack where you can pretend any algorithmic
+complexity is _O(1)_ by bounding the input.
+
+In the case of COW data structures, we can try twisting the definition a bit.
+Let's say that our COW structure doesn't copy after a single write, but instead
+copies after _n_ writes. This doesn't change most COW properties (assuming you
+can write atomically!), but what it does do is prevent the upward motion of
+wear. This sort of copy-on-bounded-writes (CObW) still focuses wear, but at
+each level we divide the propagation of wear by _n_. With a sufficiently
+large _n_ (> branching factor) wear propagation is no longer a problem.
+
+See where this is going? Separate, logging and COW are imperfect solutions and
+have weaknesses that limit their usefulness. But if we merge the two they can
+mutually solve each other's limitations.
+
+This is the idea behind littlefs. At the sub-block level, littlefs is built
+out of small, two block logs that provide atomic updates to metadata anywhere
+on the filesystem. At the super-block level, littlefs is a CObW tree of blocks
+that can be evicted on demand.
+
+```
+ root
+ .--------.--------.
+ | A'| B'| |
+ | | |-> |
+ | | | |
+ '--------'--------'
+ .----' '--------------.
+ A v B v
+ .--------.--------. .--------.--------.
+ | C'| D'| | | E'|new| |
+ | | |-> | | | E'|-> |
+ | | | | | | | |
+ '--------'--------' '--------'--------'
+ .-' '--. | '------------------.
+ v v .-' v
+.--------. .--------. v .--------.
+| C | | D | .--------. write | new E |
+| | | | | E | ==> | |
+| | | | | | | |
+'--------' '--------' | | '--------'
+ '--------' .-' |
+ .-' '-. .-------------|------'
+ v v v v
+ .--------. .--------. .--------.
+ | F | | G | | new F |
+ | | | | | |
+ | | | | | |
+ '--------' '--------' '--------'
+```
+
+There are still some minor issues. Small logs can be expensive in terms of
+storage, in the worst case a small log costs 4x the size of the original data.
+CObW structures require an efficient block allocator since allocation occurs
+every _n_ writes. And there is still the challenge of keeping the RAM usage
+constant.
+
+## Metadata pairs
+
+Metadata pairs are the backbone of littlefs. These are small, two block logs
+that allow atomic updates anywhere in the filesystem.
+
+Why two blocks? Well, logs work by appending entries to a circular buffer
+stored on disk. But remember that flash has limited write granularity. We can
+incrementally program new data onto erased blocks, but we need to erase a full
+block at a time. This means that in order for our circular buffer to work, we
+need more than one block.
+
+We could make our logs larger than two blocks, but the next challenge is how
+do we store references to these logs? Because the blocks themselves are erased
+during writes, using a data structure to track these blocks is complicated.
+The simple solution here is to store a two block addresses for every metadata
+pair. This has the added advantage that we can change out blocks in the
+metadata pair independently, and we don't reduce our block granularity for
+other operations.
+
+In order to determine which metadata block is the most recent, we store a
+revision count that we compare using [sequence arithmetic][wikipedia-sna]
+(very handy for avoiding problems with integer overflow). Conveniently, this
+revision count also gives us a rough idea of how many erases have occurred on
+the block.
+
+```
+metadata pair pointer: {block 0, block 1}
+ | '--------------------.
+ '-. |
+disk v v
+.--------.--------.--------.--------.--------.--------.--------.--------.
+| | |metadata| |metadata| |
+| | |block 0 | |block 1 | |
+| | | | | | |
+'--------'--------'--------'--------'--------'--------'--------'--------'
+ '--. .----'
+ v v
+ metadata pair .----------------.----------------.
+ | revision 11 | revision 12 |
+ block 1 is |----------------|----------------|
+ most recent | A | A'' |
+ |----------------|----------------|
+ | checksum | checksum |
+ |----------------|----------------|
+ | B | A''' | <- most recent A
+ |----------------|----------------|
+ | A'' | checksum |
+ |----------------|----------------|
+ | checksum | | |
+ |----------------| v |
+ '----------------'----------------'
+```
+
+So how do we atomically update our metadata pairs? Atomicity (a type of
+power-loss resilience) requires two parts: redundancy and error detection.
+Error detection can be provided with a checksum, and in littlefs's case we
+use a 32-bit [CRC][wikipedia-crc]. Maintaining redundancy, on the other hand,
+requires multiple stages.
+
+1. If our block is not full and the program size is small enough to let us
+ append more entries, we can simply append the entries to the log. Because
+ we don't overwrite the original entries (remember rewriting flash requires
+ an erase), we still have the original entries if we lose power during the
+ append.
+
+ ```
+ commit A
+ .----------------.----------------. .----------------.----------------.
+ | revision 1 | revision 0 | => | revision 1 | revision 0 |
+ |----------------|----------------| |----------------|----------------|
+ | | | | | A | |
+ | v | | |----------------| |
+ | | | | checksum | |
+ | | | |----------------| |
+ | | | | | | |
+ | | | | v | |
+ | | | | | |
+ | | | | | |
+ | | | | | |
+ | | | | | |
+ '----------------'----------------' '----------------'----------------'
+ ```
+
+ Note that littlefs doesn't maintain a checksum for each entry. Many logging
+ filesystems do this, but it limits what you can update in a single atomic
+ operation. What we can do instead is group multiple entries into a commit
+ that shares a single checksum. This lets us update multiple unrelated pieces
+ of metadata as long as they reside on the same metadata pair.
+
+ ```
+ commit B and A'
+ .----------------.----------------. .----------------.----------------.
+ | revision 1 | revision 0 | => | revision 1 | revision 0 |
+ |----------------|----------------| |----------------|----------------|
+ | A | | | A | |
+ |----------------| | |----------------| |
+ | checksum | | | checksum | |
+ |----------------| | |----------------| |
+ | | | | | B | |
+ | v | | |----------------| |
+ | | | | A' | |
+ | | | |----------------| |
+ | | | | checksum | |
+ | | | |----------------| |
+ '----------------'----------------' '----------------'----------------'
+ ```
+
+2. If our block _is_ full of entries, we need to somehow remove outdated
+ entries to make space for new ones. This process is called garbage
+ collection, but because littlefs has multiple garbage collectors, we
+ also call this specific case compaction.
+
+ Compared to other filesystems, littlefs's garbage collector is relatively
+ simple. We want to avoid RAM consumption, so we use a sort of brute force
+ solution where for each entry we check to see if a newer entry has been
+ written. If the entry is the most recent we append it to our new block. This
+ is where having two blocks becomes important, if we lose power we still have
+ everything in our original block.
+
+ During this compaction step we also erase the metadata block and increment
+ the revision count. Because we can commit multiple entries at once, we can
+ write all of these changes to the second block without worrying about power
+ loss. It's only when the commit's checksum is written that the compacted
+ entries and revision count become committed and readable.
+
+ ```
+ commit B', need to compact
+ .----------------.----------------. .----------------.----------------.
+ | revision 1 | revision 0 | => | revision 1 | revision 2 |
+ |----------------|----------------| |----------------|----------------|
+ | A | | | A | A' |
+ |----------------| | |----------------|----------------|
+ | checksum | | | checksum | B' |
+ |----------------| | |----------------|----------------|
+ | B | | | B | checksum |
+ |----------------| | |----------------|----------------|
+ | A' | | | A' | | |
+ |----------------| | |----------------| v |
+ | checksum | | | checksum | |
+ |----------------| | |----------------| |
+ '----------------'----------------' '----------------'----------------'
+ ```
+
+3. If our block is full of entries _and_ we can't find any garbage, then what?
+ At this point, most logging filesystems would return an error indicating no
+ more space is available, but because we have small logs, overflowing a log
+ isn't really an error condition.
+
+ Instead, we split our original metadata pair into two metadata pairs, each
+ containing half of the entries, connected by a tail pointer. Instead of
+ increasing the size of the log and dealing with the scalability issues
+ associated with larger logs, we form a linked list of small bounded logs.
+ This is a tradeoff as this approach does use more storage space, but at the
+ benefit of improved scalability.
+
+ Despite writing to two metadata pairs, we can still maintain power
+ resilience during this split step by first preparing the new metadata pair,
+ and then inserting the tail pointer during the commit to the original
+ metadata pair.
+
+ ```
+ commit C and D, need to split
+ .----------------.----------------. .----------------.----------------.
+ | revision 1 | revision 2 | => | revision 3 | revision 2 |
+ |----------------|----------------| |----------------|----------------|
+ | A | A' | | A' | A' |
+ |----------------|----------------| |----------------|----------------|
+ | checksum | B' | | B' | B' |
+ |----------------|----------------| |----------------|----------------|
+ | B | checksum | | tail ---------------------.
+ |----------------|----------------| |----------------|----------------| |
+ | A' | | | | checksum | | |
+ |----------------| v | |----------------| | |
+ | checksum | | | | | | |
+ |----------------| | | v | | |
+ '----------------'----------------' '----------------'----------------' |
+ .----------------.---------'
+ v v
+ .----------------.----------------.
+ | revision 1 | revision 0 |
+ |----------------|----------------|
+ | C | |
+ |----------------| |
+ | D | |
+ |----------------| |
+ | checksum | |
+ |----------------| |
+ | | | |
+ | v | |
+ | | |
+ | | |
+ '----------------'----------------'
+ ```
+
+There is another complexity the crops up when dealing with small logs. The
+amortized runtime cost of garbage collection is not only dependent on its
+one time cost (_O(n²)_ for littlefs), but also depends on how often
+garbage collection occurs.
+
+Consider two extremes:
+
+1. Log is empty, garbage collection occurs once every _n_ updates
+2. Log is full, garbage collection occurs **every** update
+
+Clearly we need to be more aggressive than waiting for our metadata pair to
+be full. As the metadata pair approaches fullness the frequency of compactions
+grows very rapidly.
+
+Looking at the problem generically, consider a log with ![n] bytes for each
+entry, ![d] dynamic entries (entries that are outdated during garbage
+collection), and ![s] static entries (entries that need to be copied during
+garbage collection). If we look at the amortized runtime complexity of updating
+this log we get this formula:
+
+![cost = n + n (s / d+1)][metadata-formula1]
+
+If we let ![r] be the ratio of static space to the size of our log in bytes, we
+find an alternative representation of the number of static and dynamic entries:
+
+![s = r (size/n)][metadata-formula2]
+
+![d = (1 - r) (size/n)][metadata-formula3]
+
+Substituting these in for ![d] and ![s] gives us a nice formula for the cost of
+updating an entry given how full the log is:
+
+![cost = n + n (r (size/n) / ((1-r) (size/n) + 1))][metadata-formula4]
+
+Assuming 100 byte entries in a 4 KiB log, we can graph this using the entry
+size to find a multiplicative cost:
+
+![Metadata pair update cost graph][metadata-cost-graph]
+
+So at 50% usage, we're seeing an average of 2x cost per update, and at 75%
+usage, we're already at an average of 4x cost per update.
+
+To avoid this exponential growth, instead of waiting for our metadata pair
+to be full, we split the metadata pair once we exceed 50% capacity. We do this
+lazily, waiting until we need to compact before checking if we fit in our 50%
+limit. This limits the overhead of garbage collection to 2x the runtime cost,
+giving us an amortized runtime complexity of _O(1)_.
+
+---
+
+If we look at metadata pairs and linked-lists of metadata pairs at a high
+level, they have fairly nice runtime costs. Assuming _n_ metadata pairs,
+each containing _m_ metadata entries, the _lookup_ cost for a specific
+entry has a worst case runtime complexity of _O(nm)_. For _updating_ a specific
+entry, the worst case complexity is _O(nm²)_, with an amortized complexity
+of only _O(nm)_.
+
+However, splitting at 50% capacity does mean that in the best case our
+metadata pairs will only be 1/2 full. If we include the overhead of the second
+block in our metadata pair, each metadata entry has an effective storage cost
+of 4x the original size. I imagine users would not be happy if they found
+that they can only use a quarter of their original storage. Metadata pairs
+provide a mechanism for performing atomic updates, but we need a separate
+mechanism for storing the bulk of our data.
+
+## CTZ skip-lists
+
+Metadata pairs provide efficient atomic updates but unfortunately have a large
+storage cost. But we can work around this storage cost by only using the
+metadata pairs to store references to more dense, copy-on-write (COW) data
+structures.
+
+[Copy-on-write data structures][wikipedia-cow], also called purely functional
+data structures, are a category of data structures where the underlying
+elements are immutable. Making changes to the data requires creating new
+elements containing a copy of the updated data and replacing any references
+with references to the new elements. Generally, the performance of a COW data
+structure depends on how many old elements can be reused after replacing parts
+of the data.
+
+littlefs has several requirements of its COW structures. They need to be
+efficient to read and write, but most frustrating, they need to be traversable
+with a constant amount of RAM. Notably this rules out
+[B-trees][wikipedia-B-tree], which can not be traversed with constant RAM, and
+[B+-trees][wikipedia-B+-tree], which are not possible to update with COW
+operations.
+
+---
+
+So, what can we do? First let's consider storing files in a simple COW
+linked-list. Appending a block, which is the basis for writing files, means we
+have to update the last block to point to our new block. This requires a COW
+operation, which means we need to update the second-to-last block, and then the
+third-to-last, and so on until we've copied out the entire file.
+
+```
+A linked-list
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 |->| data 1 |->| data 2 |->| data 4 |->| data 5 |->| data 6 |
+| | | | | | | | | | | |
+| | | | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+To avoid a full copy during appends, we can store the data backwards. Appending
+blocks just requires adding the new block and no other blocks need to be
+updated. If we update a block in the middle, we still need to copy the
+following blocks, but can reuse any blocks before it. Since most file writes
+are linear, this design gambles that appends are the most common type of data
+update.
+
+```
+A backwards linked-list
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 4 |<-| data 5 |<-| data 6 |
+| | | | | | | | | | | |
+| | | | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+However, a backwards linked-list does have a rather glaring problem. Iterating
+over a file _in order_ has a runtime cost of _O(n²)_. A quadratic runtime
+just to read a file! That's awful.
+
+Fortunately we can do better. Instead of a singly linked list, littlefs
+uses a multilayered linked-list often called a
+[skip-list][wikipedia-skip-list]. However, unlike the most common type of
+skip-list, littlefs's skip-lists are strictly deterministic built around some
+interesting properties of the count-trailing-zeros (CTZ) instruction.
+
+The rules CTZ skip-lists follow are that for every _n_th block where _n_
+is divisible by 2_ˣ_, that block contains a pointer to block
+_n_-2_ˣ_. This means that each block contains anywhere from 1 to
+log₂_n_ pointers that skip to different preceding elements of the
+skip-list.
+
+The name comes from heavy use of the [CTZ instruction][wikipedia-ctz], which
+lets us calculate the power-of-two factors efficiently. For a give block _n_,
+that block contains ctz(_n_)+1 pointers.
+
+```
+A backwards CTZ skip-list
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |<-| data 4 |<-| data 5 |
+| |<-| |--| |<-| |--| | | |
+| |<-| |--| |--| |--| | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+The additional pointers let us navigate the data-structure on disk much more
+efficiently than in a singly linked list.
+
+Consider a path from data block 5 to data block 1. You can see how data block 3
+was completely skipped:
+```
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 | | data 1 |<-| data 2 | | data 3 | | data 4 |<-| data 5 |
+| | | | | |<-| |--| | | |
+| | | | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+The path to data block 0 is even faster, requiring only two jumps:
+```
+.--------. .--------. .--------. .--------. .--------. .--------.
+| data 0 | | data 1 | | data 2 | | data 3 | | data 4 |<-| data 5 |
+| | | | | | | | | | | |
+| |<-| |--| |--| |--| | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+We can find the runtime complexity by looking at the path to any block from
+the block containing the most pointers. Every step along the path divides
+the search space for the block in half, giving us a runtime of _O(log n)_.
+To get _to_ the block with the most pointers, we can perform the same steps
+backwards, which puts the runtime at _O(2 log n)_ = _O(log n)_. An interesting
+note is that this optimal path occurs naturally if we greedily choose the
+pointer that covers the most distance without passing our target.
+
+So now we have a [COW] data structure that is cheap to append with a runtime
+of _O(1)_, and can be read with a worst case runtime of _O(n log n)_. Given
+that this runtime is also divided by the amount of data we can store in a
+block, this cost is fairly reasonable.
+
+---
+
+This is a new data structure, so we still have several questions. What is the
+storage overhead? Can the number of pointers exceed the size of a block? How do
+we store a CTZ skip-list in our metadata pairs?
+
+To find the storage overhead, we can look at the data structure as multiple
+linked-lists. Each linked-list skips twice as many blocks as the previous,
+or from another perspective, each linked-list uses half as much storage as
+the previous. As we approach infinity, the storage overhead forms a geometric
+series. Solving this tells us that on average our storage overhead is only
+2 pointers per block.
+
+![lim,n->inf((1/n)sum,i,0->n(ctz(i)+1)) = sum,i,0->inf(1/2^i) = 2][ctz-formula1]
+
+Because our file size is limited the word width we use to store sizes, we can
+also solve for the maximum number of pointers we would ever need to store in a
+block. If we set the overhead of pointers equal to the block size, we get the
+following equation. Note that both a smaller block size (![B][bigB]) and larger
+word width (![w]) result in more storage overhead.
+
+![B = (w/8)ceil(log2(2^w / (B-2w/8)))][ctz-formula2]
+
+Solving the equation for ![B][bigB] gives us the minimum block size for some
+common word widths:
+
+1. 32-bit CTZ skip-list => minimum block size of 104 bytes
+2. 64-bit CTZ skip-list => minimum block size of 448 bytes
+
+littlefs uses a 32-bit word width, so our blocks can only overflow with
+pointers if they are smaller than 104 bytes. This is an easy requirement, as
+in practice, most block sizes start at 512 bytes. As long as our block size
+is larger than 104 bytes, we can avoid the extra logic needed to handle
+pointer overflow.
+
+This last question is how do we store CTZ skip-lists? We need a pointer to the
+head block, the size of the skip-list, the index of the head block, and our
+offset in the head block. But it's worth noting that each size maps to a unique
+index + offset pair. So in theory we can store only a single pointer and size.
+
+However, calculating the index + offset pair from the size is a bit
+complicated. We can start with a summation that loops through all of the blocks
+up until our given size. Let ![B][bigB] be the block size in bytes, ![w] be the
+word width in bits, ![n] be the index of the block in the skip-list, and
+![N][bigN] be the file size in bytes:
+
+![N = sum,i,0->n(B-(w/8)(ctz(i)+1))][ctz-formula3]
+
+This works quite well, but requires _O(n)_ to compute, which brings the full
+runtime of reading a file up to _O(n² log n)_. Fortunately, that summation
+doesn't need to touch the disk, so the practical impact is minimal.
+
+However, despite the integration of a bitwise operation, we can actually reduce
+this equation to a _O(1)_ form. While browsing the amazing resource that is
+the [On-Line Encyclopedia of Integer Sequences (OEIS)][oeis], I managed to find
+[A001511], which matches the iteration of the CTZ instruction,
+and [A005187], which matches its partial summation. Much to my
+surprise, these both result from simple equations, leading us to a rather
+unintuitive property that ties together two seemingly unrelated bitwise
+instructions:
+
+![sum,i,0->n(ctz(i)+1) = 2n-popcount(n)][ctz-formula4]
+
+where:
+
+1. ctz(![x]) = the number of trailing bits that are 0 in ![x]
+2. popcount(![x]) = the number of bits that are 1 in ![x]
+
+Initial tests of this surprising property seem to hold. As ![n] approaches
+infinity, we end up with an average overhead of 2 pointers, which matches our
+assumption from earlier. During iteration, the popcount function seems to
+handle deviations from this average. Of course, just to make sure I wrote a
+quick script that verified this property for all 32-bit integers.
+
+Now we can substitute into our original equation to find a more efficient
+equation for file size:
+
+![N = Bn - (w/8)(2n-popcount(n))][ctz-formula5]
+
+Unfortunately, the popcount function is non-injective, so we can't solve this
+equation for our index. But what we can do is solve for an ![n'] index that
+is greater than ![n] with error bounded by the range of the popcount function.
+We can repeatedly substitute ![n'] into the original equation until the error
+is smaller than our integer resolution. As it turns out, we only need to
+perform this substitution once, which gives us this formula for our index:
+
+![n = floor((N-(w/8)popcount(N/(B-2w/8))) / (B-2w/8))][ctz-formula6]
+
+Now that we have our index ![n], we can just plug it back into the above
+equation to find the offset. We run into a bit of a problem with integer
+overflow, but we can avoid this by rearranging the equation a bit:
+
+![off = N - (B-2w/8)n - (w/8)popcount(n)][ctz-formula7]
+
+Our solution requires quite a bit of math, but computers are very good at math.
+Now we can find both our block index and offset from a size in _O(1)_, letting
+us store CTZ skip-lists with only a pointer and size.
+
+CTZ skip-lists give us a COW data structure that is easily traversable in
+_O(n)_, can be appended in _O(1)_, and can be read in _O(n log n)_. All of
+these operations work in a bounded amount of RAM and require only two words of
+storage overhead per block. In combination with metadata pairs, CTZ skip-lists
+provide power resilience and compact storage of data.
+
+```
+ .--------.
+ .|metadata|
+ || |
+ || |
+ |'--------'
+ '----|---'
+ v
+.--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |
+| |<-| |--| | | |
+| | | | | | | |
+'--------' '--------' '--------' '--------'
+
+write data to disk, create copies
+=>
+ .--------.
+ .|metadata|
+ || |
+ || |
+ |'--------'
+ '----|---'
+ v
+.--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |
+| |<-| |--| | | |
+| | | | | | | |
+'--------' '--------' '--------' '--------'
+ ^ ^ ^
+ | | | .--------. .--------. .--------. .--------.
+ | | '----| new |<-| new |<-| new |<-| new |
+ | '----------------| data 2 |<-| data 3 |--| data 4 | | data 5 |
+ '------------------| |--| |--| | | |
+ '--------' '--------' '--------' '--------'
+
+commit to metadata pair
+=>
+ .--------.
+ .|new |
+ ||metadata|
+ || |
+ |'--------'
+ '----|---'
+ |
+.--------. .--------. .--------. .--------. |
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 | |
+| |<-| |--| | | | |
+| | | | | | | | |
+'--------' '--------' '--------' '--------' |
+ ^ ^ ^ v
+ | | | .--------. .--------. .--------. .--------.
+ | | '----| new |<-| new |<-| new |<-| new |
+ | '----------------| data 2 |<-| data 3 |--| data 4 | | data 5 |
+ '------------------| |--| |--| | | |
+ '--------' '--------' '--------' '--------'
+```
+
+## The block allocator
+
+So we now have the framework for an atomic, wear leveling filesystem. Small two
+block metadata pairs provide atomic updates, while CTZ skip-lists provide
+compact storage of data in COW blocks.
+
+But now we need to look at the [elephant] in the room. Where do all these
+blocks come from?
+
+Deciding which block to use next is the responsibility of the block allocator.
+In filesystem design, block allocation is often a second-class citizen, but in
+a COW filesystem its role becomes much more important as it is needed for
+nearly every write to the filesystem.
+
+Normally, block allocation involves some sort of free list or bitmap stored on
+the filesystem that is updated with free blocks. However, with power
+resilience, keeping these structures consistent becomes difficult. It doesn't
+help that any mistake in updating these structures can result in lost blocks
+that are impossible to recover.
+
+littlefs takes a cautious approach. Instead of trusting a free list on disk,
+littlefs relies on the fact that the filesystem on disk is a mirror image of
+the free blocks on the disk. The block allocator operates much like a garbage
+collector in a scripting language, scanning for unused blocks on demand.
+
+```
+ .----.
+ |root|
+ | |
+ '----'
+ v-------' '-------v
+.----. . . .----.
+| A | . . | B |
+| | . . | |
+'----' . . '----'
+. . . . v--' '------------v---------v
+. . . .----. . .----. .----.
+. . . | C | . | D | | E |
+. . . | | . | | | |
+. . . '----' . '----' '----'
+. . . . . . . . . .
+.----.----.----.----.----.----.----.----.----.----.----.----.
+| A | |root| C | B | | D | | E | |
+| | | | | | | | | | |
+'----'----'----'----'----'----'----'----'----'----'----'----'
+ ^ ^ ^ ^ ^
+ '-------------------'----'-------------------'----'-- free blocks
+```
+
+While this approach may sound complicated, the decision to not maintain a free
+list greatly simplifies the overall design of littlefs. Unlike programming
+languages, there are only a handful of data structures we need to traverse.
+And block deallocation, which occurs nearly as often as block allocation,
+is simply a noop. This "drop it on the floor" strategy greatly reduces the
+complexity of managing on disk data structures, especially when handling
+high-risk error conditions.
+
+---
+
+Our block allocator needs to find free blocks efficiently. You could traverse
+through every block on storage and check each one against our filesystem tree;
+however, the runtime would be abhorrent. We need to somehow collect multiple
+blocks per traversal.
+
+Looking at existing designs, some larger filesystems that use a similar "drop
+it on the floor" strategy store a bitmap of the entire storage in [RAM]. This
+works well because bitmaps are surprisingly compact. We can't use the same
+strategy here, as it violates our constant RAM requirement, but we may be able
+to modify the idea into a workable solution.
+
+```
+.----.----.----.----.----.----.----.----.----.----.----.----.
+| A | |root| C | B | | D | | E | |
+| | | | | | | | | | |
+'----'----'----'----'----'----'----'----'----'----'----'----'
+ 1 0 1 1 1 0 0 1 0 1 0 0
+ \---------------------------+----------------------------/
+ v
+ bitmap: 0xb94 (0b101110010100)
+```
+
+The block allocator in littlefs is a compromise between a disk-sized bitmap and
+a brute force traversal. Instead of a bitmap the size of storage, we keep track
+of a small, fixed-size bitmap called the lookahead buffer. During block
+allocation, we take blocks from the lookahead buffer. If the lookahead buffer
+is empty, we scan the filesystem for more free blocks, populating our lookahead
+buffer. In each scan we use an increasing offset, circling the storage as
+blocks are allocated.
+
+Here's what it might look like to allocate 4 blocks on a decently busy
+filesystem with a 32 bit lookahead and a total of 128 blocks (512 KiB
+of storage if blocks are 4 KiB):
+```
+boot... lookahead:
+ fs blocks: fffff9fffffffffeffffffffffff0000
+scanning... lookahead: fffff9ff
+ fs blocks: fffff9fffffffffeffffffffffff0000
+alloc = 21 lookahead: fffffdff
+ fs blocks: fffffdfffffffffeffffffffffff0000
+alloc = 22 lookahead: ffffffff
+ fs blocks: fffffffffffffffeffffffffffff0000
+scanning... lookahead: fffffffe
+ fs blocks: fffffffffffffffeffffffffffff0000
+alloc = 63 lookahead: ffffffff
+ fs blocks: ffffffffffffffffffffffffffff0000
+scanning... lookahead: ffffffff
+ fs blocks: ffffffffffffffffffffffffffff0000
+scanning... lookahead: ffffffff
+ fs blocks: ffffffffffffffffffffffffffff0000
+scanning... lookahead: ffff0000
+ fs blocks: ffffffffffffffffffffffffffff0000
+alloc = 112 lookahead: ffff8000
+ fs blocks: ffffffffffffffffffffffffffff8000
+```
+
+This lookahead approach has a runtime complexity of _O(n²)_ to completely
+scan storage; however, bitmaps are surprisingly compact, and in practice only
+one or two passes are usually needed to find free blocks. Additionally, the
+performance of the allocator can be optimized by adjusting the block size or
+size of the lookahead buffer, trading either write granularity or RAM for
+allocator performance.
+
+## Wear leveling
+
+The block allocator has a secondary role: wear leveling.
+
+Wear leveling is the process of distributing wear across all blocks in the
+storage to prevent the filesystem from experiencing an early death due to
+wear on a single block in the storage.
+
+littlefs has two methods of protecting against wear:
+1. Detection and recovery from bad blocks
+2. Evenly distributing wear across dynamic blocks
+
+---
+
+Recovery from bad blocks doesn't actually have anything to do with the block
+allocator itself. Instead, it relies on the ability of the filesystem to detect
+and evict bad blocks when they occur.
+
+In littlefs, it is fairly straightforward to detect bad blocks at write time.
+All writes must be sourced by some form of data in RAM, so immediately after we
+write to a block, we can read the data back and verify that it was written
+correctly. If we find that the data on disk does not match the copy we have in
+RAM, a write error has occurred and we most likely have a bad block.
+
+Once we detect a bad block, we need to recover from it. In the case of write
+errors, we have a copy of the corrupted data in RAM, so all we need to do is
+evict the bad block, allocate a new, hopefully good block, and repeat the write
+that previously failed.
+
+The actual act of evicting the bad block and replacing it with a new block is
+left up to the filesystem's copy-on-bounded-writes (CObW) data structures. One
+property of CObW data structures is that any block can be replaced during a
+COW operation. The bounded-writes part is normally triggered by a counter, but
+nothing prevents us from triggering a COW operation as soon as we find a bad
+block.
+
+```
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | | B |
+| | | |
+'----' '----'
+. . v---' .
+. . .----. .
+. . | C | .
+. . | | .
+. . '----' .
+. . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| | C | B | |
+| | | | | | |
+'----'----'----'----'----'----'----'----'----'----'
+
+update C
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | | B |
+| | | |
+'----' '----'
+. . v---' .
+. . .----. .
+. . |bad | .
+. . |blck| .
+. . '----' .
+. . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad | B | |
+| | | |blck| | |
+'----'----'----'----'----'----'----'----'----'----'
+
+oh no! bad block! relocate C
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | | B |
+| | | |
+'----' '----'
+. . v---' .
+. . .----. .
+. . |bad | .
+. . |blck| .
+. . '----' .
+. . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad | B |bad | |
+| | | |blck| |blck| |
+'----'----'----'----'----'----'----'----'----'----'
+ --------->
+oh no! bad block! relocate C
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | | B |
+| | | |
+'----' '----'
+. . v---' .
+. . .----. . .----.
+. . |bad | . | C' |
+. . |blck| . | |
+. . '----' . '----'
+. . . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad | B |bad | C' | |
+| | | |blck| |blck| | |
+'----'----'----'----'----'----'----'----'----'----'
+ -------------->
+successfully relocated C, update B
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----.
+| A | |bad |
+| | |blck|
+'----' '----'
+. . v---' .
+. . .----. . .----.
+. . |bad | . | C' |
+. . |blck| . | |
+. . '----' . '----'
+. . . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad |bad |bad | C' | |
+| | | |blck|blck|blck| | |
+'----'----'----'----'----'----'----'----'----'----'
+
+oh no! bad block! relocate B
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----. .----.
+| A | |bad | |bad |
+| | |blck| |blck|
+'----' '----' '----'
+. . v---' . . .
+. . .----. . .----. .
+. . |bad | . | C' | .
+. . |blck| . | | .
+. . '----' . '----' .
+. . . . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| |bad |bad |bad | C' |bad |
+| | | |blck|blck|blck| |blck|
+'----'----'----'----'----'----'----'----'----'----'
+ -------------->
+oh no! bad block! relocate B
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '----------------------v
+.----. .----. .----.
+| A | | B' | |bad |
+| | | | |blck|
+'----' '----' '----'
+. . . | . .---' .
+. . . '--------------v-------------v
+. . . . .----. . .----.
+. . . . |bad | . | C' |
+. . . . |blck| . | |
+. . . . '----' . '----'
+. . . . . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| B' | |bad |bad |bad | C' |bad |
+| | | | |blck|blck|blck| |blck|
+'----'----'----'----'----'----'----'----'----'----'
+------------> ------------------
+successfully relocated B, update root
+=>
+ .----.
+ |root|
+ | |
+ '----'
+ v--' '--v
+.----. .----.
+| A | | B' |
+| | | |
+'----' '----'
+. . . '---------------------------v
+. . . . .----.
+. . . . | C' |
+. . . . | |
+. . . . '----'
+. . . . . .
+.----.----.----.----.----.----.----.----.----.----.
+| A |root| B' | |bad |bad |bad | C' |bad |
+| | | | |blck|blck|blck| |blck|
+'----'----'----'----'----'----'----'----'----'----'
+```
+
+We may find that the new block is also bad, but hopefully after repeating this
+cycle we'll eventually find a new block where a write succeeds. If we don't,
+that means that all blocks in our storage are bad, and we've reached the end of
+our device's usable life. At this point, littlefs will return an "out of space"
+error. This is technically true, as there are no more good blocks, but as an
+added benefit it also matches the error condition expected by users of
+dynamically sized data.
+
+---
+
+Read errors, on the other hand, are quite a bit more complicated. We don't have
+a copy of the data lingering around in RAM, so we need a way to reconstruct the
+original data even after it has been corrupted. One such mechanism for this is
+[error-correction-codes (ECC)][wikipedia-ecc].
+
+ECC is an extension to the idea of a checksum. Where a checksum such as CRC can
+detect that an error has occurred in the data, ECC can detect and actually
+correct some amount of errors. However, there is a limit to how many errors ECC
+can detect: the [Hamming bound][wikipedia-hamming-bound]. As the number of
+errors approaches the Hamming bound, we may still be able to detect errors, but
+can no longer fix the data. If we've reached this point the block is
+unrecoverable.
+
+littlefs by itself does **not** provide ECC. The block nature and relatively
+large footprint of ECC does not work well with the dynamically sized data of
+filesystems, correcting errors without RAM is complicated, and ECC fits better
+with the geometry of block devices. In fact, several NOR flash chips have extra
+storage intended for ECC, and many NAND chips can even calculate ECC on the
+chip itself.
+
+In littlefs, ECC is entirely optional. Read errors can instead be prevented
+proactively by wear leveling. But it's important to note that ECC can be used
+at the block device level to modestly extend the life of a device. littlefs
+respects any errors reported by the block device, allowing a block device to
+provide additional aggressive error detection.
+
+---
+
+To avoid read errors, we need to be proactive, as opposed to reactive as we
+were with write errors.
+
+One way to do this is to detect when the number of errors in a block exceeds
+some threshold, but is still recoverable. With ECC we can do this at write
+time, and treat the error as a write error, evicting the block before fatal
+read errors have a chance to develop.
+
+A different, more generic strategy, is to proactively distribute wear across
+all blocks in the storage, with the hope that no single block fails before the
+rest of storage is approaching the end of its usable life. This is called
+wear leveling.
+
+Generally, wear leveling algorithms fall into one of two categories:
+
+1. [Dynamic wear leveling][wikipedia-dynamic-wear-leveling], where we
+ distribute wear over "dynamic" blocks. The can be accomplished by
+ only considering unused blocks.
+
+2. [Static wear leveling][wikipedia-static-wear-leveling], where we
+ distribute wear over both "dynamic" and "static" blocks. To make this work,
+ we need to consider all blocks, including blocks that already contain data.
+
+As a tradeoff for code size and complexity, littlefs (currently) only provides
+dynamic wear leveling. This is a best effort solution. Wear is not distributed
+perfectly, but it is distributed among the free blocks and greatly extends the
+life of a device.
+
+On top of this, littlefs uses a statistical wear leveling algorithm. What this
+means is that we don’t actively track wear, instead we rely on a uniform
+distribution of wear across storage to approximate a dynamic wear leveling
+algorithm. Despite the long name, this is actually a simplification of dynamic
+wear leveling.
+
+The uniform distribution of wear is left up to the block allocator, which
+creates a uniform distribution in two parts. The easy part is when the device
+is powered, in which case we allocate the blocks linearly, circling the device.
+The harder part is what to do when the device loses power. We can't just
+restart the allocator at the beginning of storage, as this would bias the wear.
+Instead, we start the allocator as a random offset every time we mount the
+filesystem. As long as this random offset is uniform, the combined allocation
+pattern is also a uniform distribution.
+
+![Cumulative wear distribution graph][wear-distribution-graph]
+
+Initially, this approach to wear leveling looks like it creates a difficult
+dependency on a power-independent random number generator, which must return
+different random numbers on each boot. However, the filesystem is in a
+relatively unique situation in that it is sitting on top of a large of amount
+of entropy that persists across power loss.
+
+We can actually use the data on disk to directly drive our random number
+generator. In practice, this is implemented by xoring the checksums of each
+metadata pair, which is already calculated to fetch and mount the filesystem.
+
+```
+ .--------. \ probably random
+ .|metadata| | ^
+ || | +-> crc ----------------------> xor
+ || | | ^
+ |'--------' / |
+ '---|--|-' |
+ .-' '-------------------------. |
+ | | |
+ | .--------------> xor ------------> xor
+ | | ^ | ^
+ v crc crc v crc
+ .--------. \ ^ .--------. \ ^ .--------. \ ^
+ .|metadata|-|--|-->|metadata| | | .|metadata| | |
+ || | +--' || | +--' || | +--'
+ || | | || | | || | |
+ |'--------' / |'--------' / |'--------' /
+ '---|--|-' '----|---' '---|--|-'
+ .-' '-. | .-' '-.
+ v v v v v
+.--------. .--------. .--------. .--------. .--------.
+| data | | data | | data | | data | | data |
+| | | | | | | | | |
+| | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------'
+```
+
+Note that this random number generator is not perfect. It only returns unique
+random numbers when the filesystem is modified. This is exactly what we want
+for distributing wear in the allocator, but means this random number generator
+is not useful for general use.
+
+---
+
+Together, bad block detection and dynamic wear leveling provide a best effort
+solution for avoiding the early death of a filesystem due to wear. Importantly,
+littlefs's wear leveling algorithm provides a key feature: You can increase the
+life of a device simply by increasing the size of storage. And if more
+aggressive wear leveling is desired, you can always combine littlefs with a
+[flash translation layer (FTL)][wikipedia-ftl] to get a small power resilient
+filesystem with static wear leveling.
+
+## Files
+
+Now that we have our building blocks out of the way, we can start looking at
+our filesystem as a whole.
+
+The first step: How do we actually store our files?
+
+We've determined that CTZ skip-lists are pretty good at storing data compactly,
+so following the precedent found in other filesystems we could give each file
+a skip-list stored in a metadata pair that acts as an inode for the file.
+
+
+```
+ .--------.
+ .|metadata|
+ || |
+ || |
+ |'--------'
+ '----|---'
+ v
+.--------. .--------. .--------. .--------.
+| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |
+| |<-| |--| | | |
+| | | | | | | |
+'--------' '--------' '--------' '--------'
+```
+
+However, this doesn't work well when files are small, which is common for
+embedded systems. Compared to PCs, _all_ data in an embedded system is small.
+
+Consider a small 4-byte file. With a two block metadata-pair and one block for
+the CTZ skip-list, we find ourselves using a full 3 blocks. On most NOR flash
+with 4 KiB blocks, this is 12 KiB of overhead. A ridiculous 3072x increase.
+
+```
+file stored as inode, 4 bytes costs ~12 KiB
+
+ .----------------. \
+.| revision | |
+||----------------| \ |
+|| skiplist ---. +- metadata |
+||----------------| | / 4x8 bytes |
+|| checksum | | 32 bytes |
+||----------------| | |
+|| | | | +- metadata pair
+|| v | | | 2x4 KiB
+|| | | | 8 KiB
+|| | | |
+|| | | |
+|| | | |
+|'----------------' | |
+'----------------' | /
+ .--------'
+ v
+ .----------------. \ \
+ | data | +- data |
+ |----------------| / 4 bytes |
+ | | |
+ | | |
+ | | |
+ | | +- data block
+ | | | 4 KiB
+ | | |
+ | | |
+ | | |
+ | | |
+ | | |
+ '----------------' /
+```
+
+We can make several improvements. First, instead of giving each file its own
+metadata pair, we can store multiple files in a single metadata pair. One way
+to do this is to directly associate a directory with a metadata pair (or a
+linked list of metadata pairs). This makes it easy for multiple files to share
+the directory's metadata pair for logging and reduces the collective storage
+overhead.
+
+The strict binding of metadata pairs and directories also gives users
+direct control over storage utilization depending on how they organize their
+directories.
+
+```
+multiple files stored in metadata pair, 4 bytes costs ~4 KiB
+
+ .----------------.
+ .| revision |
+ ||----------------|
+ || A name |
+ || A skiplist -----.
+ ||----------------| | \
+ || B name | | +- metadata
+ || B skiplist ---. | | 4x8 bytes
+ ||----------------| | | / 32 bytes
+ || checksum | | |
+ ||----------------| | |
+ || | | | |
+ || v | | |
+ |'----------------' | |
+ '----------------' | |
+ .----------------' |
+ v v
+.----------------. .----------------. \ \
+| A data | | B data | +- data |
+| | |----------------| / 4 bytes |
+| | | | |
+| | | | |
+| | | | |
+| | | | + data block
+| | | | | 4 KiB
+| | | | |
+|----------------| | | |
+| | | | |
+| | | | |
+| | | | |
+'----------------' '----------------' /
+```
+
+The second improvement we can make is noticing that for very small files, our
+attempts to use CTZ skip-lists for compact storage backfires. Metadata pairs
+have a ~4x storage cost, so if our file is smaller than 1/4 the block size,
+there's actually no benefit in storing our file outside of our metadata pair.
+
+In this case, we can store the file directly in our directory's metadata pair.
+We call this an inline file, and it allows a directory to store many small
+files quite efficiently. Our previous 4 byte file now only takes up a
+theoretical 16 bytes on disk.
+
+```
+inline files stored in metadata pair, 4 bytes costs ~16 bytes
+
+ .----------------.
+.| revision |
+||----------------|
+|| A name |
+|| A skiplist ---.
+||----------------| | \
+|| B name | | +- data
+|| B data | | | 4x4 bytes
+||----------------| | / 16 bytes
+|| checksum | |
+||----------------| |
+|| | | |
+|| v | |
+|'----------------' |
+'----------------' |
+ .---------'
+ v
+ .----------------.
+ | A data |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ |----------------|
+ | |
+ | |
+ | |
+ '----------------'
+```
+
+Once the file exceeds 1/4 the block size, we switch to a CTZ skip-list. This
+means that our files never use more than 4x storage overhead, decreasing as
+the file grows in size.
+
+![File storage cost graph][file-cost-graph]
+
+## Directories
+
+Now we just need directories to store our files. As mentioned above we want
+a strict binding of directories and metadata pairs, but there are a few
+complications we need to sort out.
+
+On their own, each directory is a linked-list of metadata pairs. This lets us
+store an unlimited number of files in each directory, and we don't need to
+worry about the runtime complexity of unbounded logs. We can store other
+directory pointers in our metadata pairs, which gives us a directory tree, much
+like what you find on other filesystems.
+
+```
+ .--------.
+ .| root |
+ || |
+ || |
+ |'--------'
+ '---|--|-'
+ .-' '-------------------------.
+ v v
+ .--------. .--------. .--------.
+ .| dir A |------->| dir A | .| dir B |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '---|--|-' '----|---' '---|--|-'
+ .-' '-. | .-' '-.
+ v v v v v
+.--------. .--------. .--------. .--------. .--------.
+| file C | | file D | | file E | | file F | | file G |
+| | | | | | | | | |
+| | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------'
+```
+
+The main complication is, once again, traversal with a constant amount of
+[RAM]. The directory tree is a tree, and the unfortunate fact is you can't
+traverse a tree with constant RAM.
+
+Fortunately, the elements of our tree are metadata pairs, so unlike CTZ
+skip-lists, we're not limited to strict COW operations. One thing we can do is
+thread a linked-list through our tree, explicitly enabling cheap traversal
+over the entire filesystem.
+
+```
+ .--------.
+ .| root |-.
+ || | |
+ .-------|| |-'
+ | |'--------'
+ | '---|--|-'
+ | .-' '-------------------------.
+ | v v
+ | .--------. .--------. .--------.
+ '->| dir A |------->| dir A |------->| dir B |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '---|--|-' '----|---' '---|--|-'
+ .-' '-. | .-' '-.
+ v v v v v
+.--------. .--------. .--------. .--------. .--------.
+| file C | | file D | | file E | | file F | | file G |
+| | | | | | | | | |
+| | | | | | | | | |
+'--------' '--------' '--------' '--------' '--------'
+```
+
+Unfortunately, not sticking to pure COW operations creates some problems. Now,
+whenever we want to manipulate the directory tree, multiple pointers need to be
+updated. If you're familiar with designing atomic data structures this should
+set off a bunch of red flags.
+
+To work around this, our threaded linked-list has a bit of leeway. Instead of
+only containing metadata pairs found in our filesystem, it is allowed to
+contain metadata pairs that have no parent because of a power loss. These are
+called orphaned metadata pairs.
+
+With the possibility of orphans, we can build power loss resilient operations
+that maintain a filesystem tree threaded with a linked-list for traversal.
+
+Adding a directory to our tree:
+
+```
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-.
+| v v
+| .--------. .--------.
+'->| dir A |->| dir C |
+ || | || |
+ || | || |
+ |'--------' |'--------'
+ '--------' '--------'
+
+allocate dir B
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-.
+| v v
+| .--------. .--------.
+'->| dir A |--->| dir C |
+ || | .->| |
+ || | | || |
+ |'--------' | |'--------'
+ '--------' | '--------'
+ |
+ .--------. |
+ .| dir B |-'
+ || |
+ || |
+ |'--------'
+ '--------'
+
+insert dir B into threaded linked-list, creating an orphan
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-------------.
+| v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || orphan!| || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+
+add dir B to parent directory
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+```
+
+Removing a directory:
+
+```
+ .--------.
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+
+remove dir B from parent directory, creating an orphan
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-------------.
+| v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || orphan!| || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+
+remove dir B from threaded linked-list, returning dir B to free blocks
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------|| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-.
+| v v
+| .--------. .--------.
+'->| dir A |->| dir C |
+ || | || |
+ || | || |
+ |'--------' |'--------'
+ '--------' '--------'
+```
+
+In addition to normal directory tree operations, we can use orphans to evict
+blocks in a metadata pair when the block goes bad or exceeds its allocated
+erases. If we lose power while evicting a metadata block we may end up with
+a situation where the filesystem references the replacement block while the
+threaded linked-list still contains the evicted block. We call this a
+half-orphan.
+
+```
+ .--------.
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+
+try to write to dir B
+=>
+ .--------.
+ .| root |-.
+ || | |
+.----------------|| |-'
+| |'--------'
+| '-|-||-|-'
+| .--------' || '-----.
+| v |v v
+| .--------. .--------. .--------.
+'->| dir A |---->| dir B |->| dir C |
+ || |-. | | || |
+ || | | | | || |
+ |'--------' | '--------' |'--------'
+ '--------' | v '--------'
+ | .--------.
+ '->| dir B |
+ | bad |
+ | block! |
+ '--------'
+
+oh no! bad block detected, allocate replacement
+=>
+ .--------.
+ .| root |-.
+ || | |
+.----------------|| |-'
+| |'--------'
+| '-|-||-|-'
+| .--------' || '-------.
+| v |v v
+| .--------. .--------. .--------.
+'->| dir A |---->| dir B |--->| dir C |
+ || |-. | | .->| |
+ || | | | | | || |
+ |'--------' | '--------' | |'--------'
+ '--------' | v | '--------'
+ | .--------. |
+ '->| dir B | |
+ | bad | |
+ | block! | |
+ '--------' |
+ |
+ .--------. |
+ | dir B |--'
+ | |
+ | |
+ '--------'
+
+insert replacement in threaded linked-list, creating a half-orphan
+=>
+ .--------.
+ .| root |-.
+ || | |
+.----------------|| |-'
+| |'--------'
+| '-|-||-|-'
+| .--------' || '-------.
+| v |v v
+| .--------. .--------. .--------.
+'->| dir A |---->| dir B |--->| dir C |
+ || |-. | | .->| |
+ || | | | | | || |
+ |'--------' | '--------' | |'--------'
+ '--------' | v | '--------'
+ | .--------. |
+ | | dir B | |
+ | | bad | |
+ | | block! | |
+ | '--------' |
+ | |
+ | .--------. |
+ '->| dir B |--'
+ | half |
+ | orphan!|
+ '--------'
+
+fix reference in parent directory
+=>
+ .--------.
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+```
+
+Finding orphans and half-orphans is expensive, requiring a _O(n²)_
+comparison of every metadata pair with every directory entry. But the tradeoff
+is a power resilient filesystem that works with only a bounded amount of RAM.
+Fortunately, we only need to check for orphans on the first allocation after
+boot, and a read-only littlefs can ignore the threaded linked-list entirely.
+
+If we only had some sort of global state, then we could also store a flag and
+avoid searching for orphans unless we knew we were specifically interrupted
+while manipulating the directory tree (foreshadowing!).
+
+## The move problem
+
+We have one last challenge: the move problem. Phrasing the problem is simple:
+
+How do you atomically move a file between two directories?
+
+In littlefs we can atomically commit to directories, but we can't create
+an atomic commit that spans multiple directories. The filesystem must go
+through a minimum of two distinct states to complete a move.
+
+To make matters worse, file moves are a common form of synchronization for
+filesystems. As a filesystem designed for power-loss, it's important we get
+atomic moves right.
+
+So what can we do?
+
+- We definitely can't just let power-loss result in duplicated or lost files.
+ This could easily break users' code and would only reveal itself in extreme
+ cases. We were only able to be lazy about the threaded linked-list because
+ it isn't user facing and we can handle the corner cases internally.
+
+- Some filesystems propagate COW operations up the tree until a common parent
+ is found. Unfortunately this interacts poorly with our threaded tree and
+ brings back the issue of upward propagation of wear.
+
+- In a previous version of littlefs we tried to solve this problem by going
+ back and forth between the source and destination, marking and unmarking the
+ file as moving in order to make the move atomic from the user perspective.
+ This worked, but not well. Finding failed moves was expensive and required
+ a unique identifier for each file.
+
+In the end, solving the move problem required creating a new mechanism for
+sharing knowledge between multiple metadata pairs. In littlefs this led to the
+introduction of a mechanism called "global state".
+
+---
+
+Global state is a small set of state that can be updated from _any_ metadata
+pair. Combining global state with metadata pairs' ability to update multiple
+entries in one commit gives us a powerful tool for crafting complex atomic
+operations.
+
+How does global state work?
+
+Global state exists as a set of deltas that are distributed across the metadata
+pairs in the filesystem. The actual global state can be built out of these
+deltas by xoring together all of the deltas in the filesystem.
+
+```
+ .--------. .--------. .--------. .--------. .--------.
+.| |->| gdelta |->| |->| gdelta |->| gdelta |
+|| | || 0x23 | || | || 0xff | || 0xce |
+|| | || | || | || | || |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '----|---' '--------' '----|---' '----|---'
+ v v v
+ 0x00 --> xor ------------------> xor ------> xor --> gstate 0x12
+```
+
+To update the global state from a metadata pair, we take the global state we
+know and xor it with both our changes and any existing delta in the metadata
+pair. Committing this new delta to the metadata pair commits the changes to
+the filesystem's global state.
+
+```
+ .--------. .--------. .--------. .--------. .--------.
+.| |->| gdelta |->| |->| gdelta |->| gdelta |
+|| | || 0x23 | || | || 0xff | || 0xce |
+|| | || | || | || | || |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '----|---' '--------' '--|---|-' '----|---'
+ v v | v
+ 0x00 --> xor ----------------> xor -|------> xor --> gstate = 0x12
+ | |
+ | |
+change gstate to 0xab --> xor <------------|--------------------------'
+=> | v
+ '------------> xor
+ |
+ v
+ .--------. .--------. .--------. .--------. .--------.
+.| |->| gdelta |->| |->| gdelta |->| gdelta |
+|| | || 0x23 | || | || 0x46 | || 0xce |
+|| | || | || | || | || |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '----|---' '--------' '----|---' '----|---'
+ v v v
+ 0x00 --> xor ------------------> xor ------> xor --> gstate = 0xab
+```
+
+To make this efficient, we always keep a copy of the global state in RAM. We
+only need to iterate over our metadata pairs and build the global state when
+the filesystem is mounted.
+
+You may have noticed that global state is very expensive. We keep a copy in
+RAM and a delta in an unbounded number of metadata pairs. Even if we reset
+the global state to its initial value, we can't easily clean up the deltas on
+disk. For this reason, it's very important that we keep the size of global
+state bounded and extremely small. But, even with a strict budget, global
+state is incredibly valuable.
+
+---
+
+Now we can solve the move problem. We can create global state describing our
+move atomically with the creation of the new file, and we can clear this move
+state atomically with the removal of the old file.
+
+```
+ .--------. gstate = no move
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '----|---' '--------' '--------'
+ v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+
+begin move, add reference in dir C, change gstate to have move
+=>
+ .--------. gstate = moving file D in dir A (m1)
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || | || | || gdelta |
+ || | || | || =m1 |
+ |'--------' |'--------' |'--------'
+ '----|---' '--------' '----|---'
+ | .----------------'
+ v v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+
+complete move, remove reference in dir A, change gstate to no move
+=>
+ .--------. gstate = no move (m1^~m1)
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || gdelta | || | || gdelta |
+ || =~m1 | || | || =m1 |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '----|---'
+ v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+```
+
+
+If, after building our global state during mount, we find information
+describing an ongoing move, we know we lost power during a move and the file
+is duplicated in both the source and destination directories. If this happens,
+we can resolve the move using the information in the global state to remove
+one of the files.
+
+```
+ .--------. gstate = moving file D in dir A (m1)
+ .| root |-. ^
+ || |------------> xor
+.---------------|| |-' ^
+| |'--------' |
+| '--|-|-|-' |
+| .--------' | '---------. |
+| | | | |
+| | .----------> xor --------> xor
+| v | v ^ v ^
+| .--------. | .--------. | .--------. |
+'->| dir A |-|->| dir B |-|->| dir C | |
+ || |-' || |-' || gdelta |-'
+ || | || | || =m1 |
+ |'--------' |'--------' |'--------'
+ '----|---' '--------' '----|---'
+ | .---------------------'
+ v v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+```
+
+We can also move directories the same way we move files. There is the threaded
+linked-list to consider, but leaving the threaded linked-list unchanged works
+fine as the order doesn't really matter.
+
+```
+ .--------. gstate = no move (m1^~m1)
+ .| root |-.
+ || | |
+.-------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .------' | '-------.
+| v v v
+| .--------. .--------. .--------.
+'->| dir A |->| dir B |->| dir C |
+ || gdelta | || | || gdelta |
+ || =~m1 | || | || =m1 |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '----|---'
+ v
+ .--------.
+ | file D |
+ | |
+ | |
+ '--------'
+
+begin move, add reference in dir C, change gstate to have move
+=>
+ .--------. gstate = moving dir B in root (m1^~m1^m2)
+ .| root |-.
+ || | |
+.--------------|| |-'
+| |'--------'
+| '--|-|-|-'
+| .-------' | '----------.
+| v | v
+| .--------. | .--------.
+'->| dir A |-. | .->| dir C |
+ || gdelta | | | | || gdelta |
+ || =~m1 | | | | || =m1^m2 |
+ |'--------' | | | |'--------'
+ '--------' | | | '---|--|-'
+ | | .-------' |
+ | v v | v
+ | .--------. | .--------.
+ '->| dir B |-' | file D |
+ || | | |
+ || | | |
+ |'--------' '--------'
+ '--------'
+
+complete move, remove reference in root, change gstate to no move
+=>
+ .--------. gstate = no move (m1^~m1^m2^~m2)
+ .| root |-.
+ || gdelta | |
+.-----------|| =~m2 |-'
+| |'--------'
+| '---|--|-'
+| .-----' '-----.
+| v v
+| .--------. .--------.
+'->| dir A |-. .->| dir C |
+ || gdelta | | | || gdelta |
+ || =~m1 | | '-|| =m1^m2 |-------.
+ |'--------' | |'--------' |
+ '--------' | '---|--|-' |
+ | .-' '-. |
+ | v v |
+ | .--------. .--------. |
+ '->| dir B |--| file D |-'
+ || | | |
+ || | | |
+ |'--------' '--------'
+ '--------'
+```
+
+Global state gives us a powerful tool we can use to solve the move problem.
+And the result is surprisingly performant, only needing the minimum number
+of states and using the same number of commits as a naive move. Additionally,
+global state gives us a bit of persistent state we can use for some other
+small improvements.
+
+## Conclusion
+
+And that's littlefs, thanks for reading!
+
+
+[wikipedia-flash]: https://en.wikipedia.org/wiki/Flash_memory
+[wikipedia-sna]: https://en.wikipedia.org/wiki/Serial_number_arithmetic
+[wikipedia-crc]: https://en.wikipedia.org/wiki/Cyclic_redundancy_check
+[wikipedia-cow]: https://en.wikipedia.org/wiki/Copy-on-write
+[wikipedia-B-tree]: https://en.wikipedia.org/wiki/B-tree
+[wikipedia-B+-tree]: https://en.wikipedia.org/wiki/B%2B_tree
+[wikipedia-skip-list]: https://en.wikipedia.org/wiki/Skip_list
+[wikipedia-ctz]: https://en.wikipedia.org/wiki/Count_trailing_zeros
+[wikipedia-ecc]: https://en.wikipedia.org/wiki/Error_correction_code
+[wikipedia-hamming-bound]: https://en.wikipedia.org/wiki/Hamming_bound
+[wikipedia-dynamic-wear-leveling]: https://en.wikipedia.org/wiki/Wear_leveling#Dynamic_wear_leveling
+[wikipedia-static-wear-leveling]: https://en.wikipedia.org/wiki/Wear_leveling#Static_wear_leveling
+[wikipedia-ftl]: https://en.wikipedia.org/wiki/Flash_translation_layer
+
+[oeis]: https://oeis.org
+[A001511]: https://oeis.org/A001511
+[A005187]: https://oeis.org/A005187
+
+[fat]: https://en.wikipedia.org/wiki/Design_of_the_FAT_file_system
+[ext2]: http://e2fsprogs.sourceforge.net/ext2intro.html
+[jffs]: https://www.sourceware.org/jffs2/jffs2-html
+[yaffs]: https://yaffs.net/documents/how-yaffs-works
+[spiffs]: https://github.com/pellepl/spiffs/blob/master/docs/TECH_SPEC
+[ext4]: https://ext4.wiki.kernel.org/index.php/Ext4_Design
+[ntfs]: https://en.wikipedia.org/wiki/NTFS
+[btrfs]: https://btrfs.wiki.kernel.org/index.php/Btrfs_design
+[zfs]: https://en.wikipedia.org/wiki/ZFS
+
+[cow]: https://upload.wikimedia.org/wikipedia/commons/0/0c/Cow_female_black_white.jpg
+[elephant]: https://upload.wikimedia.org/wikipedia/commons/3/37/African_Bush_Elephant.jpg
+[ram]: https://upload.wikimedia.org/wikipedia/commons/9/97/New_Mexico_Bighorn_Sheep.JPG
+
+[metadata-formula1]: https://latex.codecogs.com/svg.latex?cost%20%3D%20n%20+%20n%20%5Cfrac%7Bs%7D%7Bd+1%7D
+[metadata-formula2]: https://latex.codecogs.com/svg.latex?s%20%3D%20r%20%5Cfrac%7Bsize%7D%7Bn%7D
+[metadata-formula3]: https://latex.codecogs.com/svg.latex?d%20%3D%20%281-r%29%20%5Cfrac%7Bsize%7D%7Bn%7D
+[metadata-formula4]: https://latex.codecogs.com/svg.latex?cost%20%3D%20n%20+%20n%20%5Cfrac%7Br%5Cfrac%7Bsize%7D%7Bn%7D%7D%7B%281-r%29%5Cfrac%7Bsize%7D%7Bn%7D+1%7D
+
+[ctz-formula1]: https://latex.codecogs.com/svg.latex?%5Clim_%7Bn%5Cto%5Cinfty%7D%5Cfrac%7B1%7D%7Bn%7D%5Csum_%7Bi%3D0%7D%5E%7Bn%7D%5Cleft%28%5Ctext%7Bctz%7D%28i%29+1%5Cright%29%20%3D%20%5Csum_%7Bi%3D0%7D%5Cfrac%7B1%7D%7B2%5Ei%7D%20%3D%202
+[ctz-formula2]: https://latex.codecogs.com/svg.latex?B%20%3D%20%5Cfrac%7Bw%7D%7B8%7D%5Cleft%5Clceil%5Clog_2%5Cleft%28%5Cfrac%7B2%5Ew%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D%5Cright%29%5Cright%5Crceil
+[ctz-formula3]: https://latex.codecogs.com/svg.latex?N%20%3D%20%5Csum_i%5En%5Cleft%5BB-%5Cfrac%7Bw%7D%7B8%7D%5Cleft%28%5Ctext%7Bctz%7D%28i%29+1%5Cright%29%5Cright%5D
+[ctz-formula4]: https://latex.codecogs.com/svg.latex?%5Csum_i%5En%5Cleft%28%5Ctext%7Bctz%7D%28i%29+1%5Cright%29%20%3D%202n-%5Ctext%7Bpopcount%7D%28n%29
+[ctz-formula5]: https://latex.codecogs.com/svg.latex?N%20%3D%20Bn%20-%20%5Cfrac%7Bw%7D%7B8%7D%5Cleft%282n-%5Ctext%7Bpopcount%7D%28n%29%5Cright%29
+[ctz-formula6]: https://latex.codecogs.com/svg.latex?n%20%3D%20%5Cleft%5Clfloor%5Cfrac%7BN-%5Cfrac%7Bw%7D%7B8%7D%5Cleft%28%5Ctext%7Bpopcount%7D%5Cleft%28%5Cfrac%7BN%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D-1%5Cright%29+2%5Cright%29%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D%5Cright%5Crfloor
+[ctz-formula7]: https://latex.codecogs.com/svg.latex?%5Cmathit%7Boff%7D%20%3D%20N%20-%20%5Cleft%28B-2%5Cfrac%7Bw%7D%7B8%7D%5Cright%29n%20-%20%5Cfrac%7Bw%7D%7B8%7D%5Ctext%7Bpopcount%7D%28n%29
+
+[bigB]: https://latex.codecogs.com/svg.latex?B
+[d]: https://latex.codecogs.com/svg.latex?d
+[m]: https://latex.codecogs.com/svg.latex?m
+[bigN]: https://latex.codecogs.com/svg.latex?N
+[n]: https://latex.codecogs.com/svg.latex?n
+[n']: https://latex.codecogs.com/svg.latex?n%27
+[r]: https://latex.codecogs.com/svg.latex?r
+[s]: https://latex.codecogs.com/svg.latex?s
+[w]: https://latex.codecogs.com/svg.latex?w
+[x]: https://latex.codecogs.com/svg.latex?x
+
+[metadata-cost-graph]: https://raw.githubusercontent.com/geky/littlefs/gh-images/metadata-cost.svg?sanitize=true
+[wear-distribution-graph]: https://raw.githubusercontent.com/geky/littlefs/gh-images/wear-distribution.svg?sanitize=true
+[file-cost-graph]: https://raw.githubusercontent.com/geky/littlefs/gh-images/file-cost.svg?sanitize=true
diff --git a/components/fs/littlefs/littlefs/LICENSE.md b/components/fs/littlefs/littlefs/LICENSE.md
new file mode 100644
index 00000000..e6c3a7ba
--- /dev/null
+++ b/components/fs/littlefs/littlefs/LICENSE.md
@@ -0,0 +1,25 @@
+Copyright (c) 2022, The littlefs authors.
+Copyright (c) 2017, Arm Limited. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+- Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+- Neither the name of ARM nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific prior
+ written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/components/fs/littlefs/littlefs/Makefile b/components/fs/littlefs/littlefs/Makefile
new file mode 100644
index 00000000..13879336
--- /dev/null
+++ b/components/fs/littlefs/littlefs/Makefile
@@ -0,0 +1,172 @@
+ifdef BUILDDIR
+# make sure BUILDDIR ends with a slash
+override BUILDDIR := $(BUILDDIR)/
+# bit of a hack, but we want to make sure BUILDDIR directory structure
+# is correct before any commands
+$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
+ $(BUILDDIR) \
+ $(BUILDDIR)bd \
+ $(BUILDDIR)tests))
+endif
+
+# overridable target/src/tools/flags/etc
+ifneq ($(wildcard test.c main.c),)
+TARGET ?= $(BUILDDIR)lfs
+else
+TARGET ?= $(BUILDDIR)lfs.a
+endif
+
+
+CC ?= gcc
+AR ?= ar
+SIZE ?= size
+CTAGS ?= ctags
+NM ?= nm
+OBJDUMP ?= objdump
+LCOV ?= lcov
+
+SRC ?= $(wildcard *.c)
+OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
+DEP := $(SRC:%.c=$(BUILDDIR)%.d)
+ASM := $(SRC:%.c=$(BUILDDIR)%.s)
+CGI := $(SRC:%.c=$(BUILDDIR)%.ci)
+
+ifdef DEBUG
+override CFLAGS += -O0
+else
+override CFLAGS += -Os
+endif
+ifdef TRACE
+override CFLAGS += -DLFS_YES_TRACE
+endif
+override CFLAGS += -g3
+override CFLAGS += -I.
+override CFLAGS += -std=c99 -Wall -Wextra -pedantic
+
+ifdef VERBOSE
+override TESTFLAGS += -v
+override CALLSFLAGS += -v
+override CODEFLAGS += -v
+override DATAFLAGS += -v
+override STACKFLAGS += -v
+override STRUCTSFLAGS += -v
+override COVERAGEFLAGS += -v
+endif
+ifdef EXEC
+override TESTFLAGS += --exec="$(EXEC)"
+endif
+ifdef COVERAGE
+override TESTFLAGS += --coverage
+endif
+ifdef BUILDDIR
+override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
+override CALLSFLAGS += --build-dir="$(BUILDDIR:/=)"
+override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
+override DATAFLAGS += --build-dir="$(BUILDDIR:/=)"
+override STACKFLAGS += --build-dir="$(BUILDDIR:/=)"
+override STRUCTSFLAGS += --build-dir="$(BUILDDIR:/=)"
+override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
+endif
+ifneq ($(NM),nm)
+override CODEFLAGS += --nm-tool="$(NM)"
+override DATAFLAGS += --nm-tool="$(NM)"
+endif
+ifneq ($(OBJDUMP),objdump)
+override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)"
+endif
+
+
+# commands
+.PHONY: all build
+all build: $(TARGET)
+
+.PHONY: asm
+asm: $(ASM)
+
+.PHONY: size
+size: $(OBJ)
+ $(SIZE) -t $^
+
+.PHONY: tags
+tags:
+ $(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
+
+.PHONY: calls
+calls: $(CGI)
+ ./scripts/calls.py $^ $(CALLSFLAGS)
+
+.PHONY: test
+test:
+ ./scripts/test.py $(TESTFLAGS)
+.SECONDEXPANSION:
+test%: tests/test$$(firstword $$(subst \#, ,%)).toml
+ ./scripts/test.py $@ $(TESTFLAGS)
+
+.PHONY: code
+code: $(OBJ)
+ ./scripts/code.py $^ -S $(CODEFLAGS)
+
+.PHONY: data
+data: $(OBJ)
+ ./scripts/data.py $^ -S $(DATAFLAGS)
+
+.PHONY: stack
+stack: $(CGI)
+ ./scripts/stack.py $^ -S $(STACKFLAGS)
+
+.PHONY: structs
+structs: $(OBJ)
+ ./scripts/structs.py $^ -S $(STRUCTSFLAGS)
+
+.PHONY: coverage
+coverage:
+ ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS)
+
+.PHONY: summary
+summary: $(BUILDDIR)lfs.csv
+ ./scripts/summary.py -Y $^ $(SUMMARYFLAGS)
+
+
+# rules
+-include $(DEP)
+.SUFFIXES:
+
+$(BUILDDIR)lfs: $(OBJ)
+ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
+
+$(BUILDDIR)lfs.a: $(OBJ)
+ $(AR) rcs $@ $^
+
+$(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
+ ./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@
+ ./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
+ ./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
+ ./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
+ $(if $(COVERAGE),\
+ ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
+ -q -m $@ $(COVERAGEFLAGS) -o $@)
+
+$(BUILDDIR)%.o: %.c
+ $(CC) -c -MMD $(CFLAGS) $< -o $@
+
+$(BUILDDIR)%.s: %.c
+ $(CC) -S $(CFLAGS) $< -o $@
+
+# gcc depends on the output file for intermediate file names, so
+# we can't omit to .o output. We also need to serialize with the
+# normal .o rule because otherwise we can end up with multiprocess
+# problems with two instances of gcc modifying the same .o
+$(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o
+ $(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $|
+
+# clean everything
+.PHONY: clean
+clean:
+ rm -f $(BUILDDIR)lfs
+ rm -f $(BUILDDIR)lfs.a
+ rm -f $(BUILDDIR)lfs.csv
+ rm -f $(OBJ)
+ rm -f $(CGI)
+ rm -f $(DEP)
+ rm -f $(ASM)
+ rm -f $(BUILDDIR)tests/*.toml.*
diff --git a/components/fs/littlefs/littlefs/README.md b/components/fs/littlefs/littlefs/README.md
new file mode 100644
index 00000000..32b3793f
--- /dev/null
+++ b/components/fs/littlefs/littlefs/README.md
@@ -0,0 +1,258 @@
+## littlefs
+
+A little fail-safe filesystem designed for microcontrollers.
+
+```
+ | | | .---._____
+ .-----. | |
+--|o |---| littlefs |
+--| |---| |
+ '-----' '----------'
+ | | |
+```
+
+**Power-loss resilience** - littlefs is designed to handle random power
+failures. All file operations have strong copy-on-write guarantees and if
+power is lost the filesystem will fall back to the last known good state.
+
+**Dynamic wear leveling** - littlefs is designed with flash in mind, and
+provides wear leveling over dynamic blocks. Additionally, littlefs can
+detect bad blocks and work around them.
+
+**Bounded RAM/ROM** - littlefs is designed to work with a small amount of
+memory. RAM usage is strictly bounded, which means RAM consumption does not
+change as the filesystem grows. The filesystem contains no unbounded
+recursion and dynamic memory is limited to configurable buffers that can be
+provided statically.
+
+## Example
+
+Here's a simple example that updates a file named `boot_count` every time
+main runs. The program can be interrupted at any time without losing track
+of how many times it has been booted and without corrupting the filesystem:
+
+``` c
+#include "lfs.h"
+
+// variables used by the filesystem
+lfs_t lfs;
+lfs_file_t file;
+
+// configuration of the filesystem is provided by this struct
+const struct lfs_config cfg = {
+ // block device operations
+ .read = user_provided_block_device_read,
+ .prog = user_provided_block_device_prog,
+ .erase = user_provided_block_device_erase,
+ .sync = user_provided_block_device_sync,
+
+ // block device configuration
+ .read_size = 16,
+ .prog_size = 16,
+ .block_size = 4096,
+ .block_count = 128,
+ .cache_size = 16,
+ .lookahead_size = 16,
+ .block_cycles = 500,
+};
+
+// entry point
+int main(void) {
+ // mount the filesystem
+ int err = lfs_mount(&lfs, &cfg);
+
+ // reformat if we can't mount the filesystem
+ // this should only happen on the first boot
+ if (err) {
+ lfs_format(&lfs, &cfg);
+ lfs_mount(&lfs, &cfg);
+ }
+
+ // read current count
+ uint32_t boot_count = 0;
+ lfs_file_open(&lfs, &file, "boot_count", LFS_O_RDWR | LFS_O_CREAT);
+ lfs_file_read(&lfs, &file, &boot_count, sizeof(boot_count));
+
+ // update boot count
+ boot_count += 1;
+ lfs_file_rewind(&lfs, &file);
+ lfs_file_write(&lfs, &file, &boot_count, sizeof(boot_count));
+
+ // remember the storage is not updated until the file is closed successfully
+ lfs_file_close(&lfs, &file);
+
+ // release any resources we were using
+ lfs_unmount(&lfs);
+
+ // print the boot count
+ printf("boot_count: %d\n", boot_count);
+}
+```
+
+## Usage
+
+Detailed documentation (or at least as much detail as is currently available)
+can be found in the comments in [lfs.h](lfs.h).
+
+littlefs takes in a configuration structure that defines how the filesystem
+operates. The configuration struct provides the filesystem with the block
+device operations and dimensions, tweakable parameters that tradeoff memory
+usage for performance, and optional static buffers if the user wants to avoid
+dynamic memory.
+
+The state of the littlefs is stored in the `lfs_t` type which is left up
+to the user to allocate, allowing multiple filesystems to be in use
+simultaneously. With the `lfs_t` and configuration struct, a user can
+format a block device or mount the filesystem.
+
+Once mounted, the littlefs provides a full set of POSIX-like file and
+directory functions, with the deviation that the allocation of filesystem
+structures must be provided by the user.
+
+All POSIX operations, such as remove and rename, are atomic, even in event
+of power-loss. Additionally, file updates are not actually committed to
+the filesystem until sync or close is called on the file.
+
+## Other notes
+
+Littlefs is written in C, and specifically should compile with any compiler
+that conforms to the `C99` standard.
+
+All littlefs calls have the potential to return a negative error code. The
+errors can be either one of those found in the `enum lfs_error` in
+[lfs.h](lfs.h), or an error returned by the user's block device operations.
+
+In the configuration struct, the `prog` and `erase` function provided by the
+user may return a `LFS_ERR_CORRUPT` error if the implementation already can
+detect corrupt blocks. However, the wear leveling does not depend on the return
+code of these functions, instead all data is read back and checked for
+integrity.
+
+If your storage caches writes, make sure that the provided `sync` function
+flushes all the data to memory and ensures that the next read fetches the data
+from memory, otherwise data integrity can not be guaranteed. If the `write`
+function does not perform caching, and therefore each `read` or `write` call
+hits the memory, the `sync` function can simply return 0.
+
+## Design
+
+At a high level, littlefs is a block based filesystem that uses small logs to
+store metadata and larger copy-on-write (COW) structures to store file data.
+
+In littlefs, these ingredients form a sort of two-layered cake, with the small
+logs (called metadata pairs) providing fast updates to metadata anywhere on
+storage, while the COW structures store file data compactly and without any
+wear amplification cost.
+
+Both of these data structures are built out of blocks, which are fed by a
+common block allocator. By limiting the number of erases allowed on a block
+per allocation, the allocator provides dynamic wear leveling over the entire
+filesystem.
+
+```
+ root
+ .--------.--------.
+ | A'| B'| |
+ | | |-> |
+ | | | |
+ '--------'--------'
+ .----' '--------------.
+ A v B v
+ .--------.--------. .--------.--------.
+ | C'| D'| | | E'|new| |
+ | | |-> | | | E'|-> |
+ | | | | | | | |
+ '--------'--------' '--------'--------'
+ .-' '--. | '------------------.
+ v v .-' v
+.--------. .--------. v .--------.
+| C | | D | .--------. write | new E |
+| | | | | E | ==> | |
+| | | | | | | |
+'--------' '--------' | | '--------'
+ '--------' .-' |
+ .-' '-. .-------------|------'
+ v v v v
+ .--------. .--------. .--------.
+ | F | | G | | new F |
+ | | | | | |
+ | | | | | |
+ '--------' '--------' '--------'
+```
+
+More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and
+[SPEC.md](SPEC.md).
+
+- [DESIGN.md](DESIGN.md) - A fully detailed dive into how littlefs works.
+ I would suggest reading it as the tradeoffs at work are quite interesting.
+
+- [SPEC.md](SPEC.md) - The on-disk specification of littlefs with all the
+ nitty-gritty details. May be useful for tooling development.
+
+## Testing
+
+The littlefs comes with a test suite designed to run on a PC using the
+[emulated block device](bd/lfs_testbd.h) found in the `bd` directory.
+The tests assume a Linux environment and can be started with make:
+
+``` bash
+make test
+```
+
+## License
+
+The littlefs is provided under the [BSD-3-Clause] license. See
+[LICENSE.md](LICENSE.md) for more information. Contributions to this project
+are accepted under the same license.
+
+Individual files contain the following tag instead of the full license text.
+
+ SPDX-License-Identifier: BSD-3-Clause
+
+This enables machine processing of license information based on the SPDX
+License Identifiers that are here available: http://spdx.org/licenses/
+
+## Related projects
+
+- [littlefs-fuse] - A [FUSE] wrapper for littlefs. The project allows you to
+ mount littlefs directly on a Linux machine. Can be useful for debugging
+ littlefs if you have an SD card handy.
+
+- [littlefs-js] - A javascript wrapper for littlefs. I'm not sure why you would
+ want this, but it is handy for demos. You can see it in action
+ [here][littlefs-js-demo].
+
+- [littlefs-python] - A Python wrapper for littlefs. The project allows you
+ to create images of the filesystem on your PC. Check if littlefs will fit
+ your needs, create images for a later download to the target memory or
+ inspect the content of a binary image of the target memory.
+
+- [mklfs] - A command line tool built by the [Lua RTOS] guys for making
+ littlefs images from a host PC. Supports Windows, Mac OS, and Linux.
+
+- [Mbed OS] - The easiest way to get started with littlefs is to jump into Mbed
+ which already has block device drivers for most forms of embedded storage.
+ littlefs is available in Mbed OS as the [LittleFileSystem] class.
+
+- [SPIFFS] - Another excellent embedded filesystem for NOR flash. As a more
+ traditional logging filesystem with full static wear-leveling, SPIFFS will
+ likely outperform littlefs on small memories such as the internal flash on
+ microcontrollers.
+
+- [Dhara] - An interesting NAND flash translation layer designed for small
+ MCUs. It offers static wear-leveling and power-resilience with only a fixed
+ _O(|address|)_ pointer structure stored on each block and in RAM.
+
+
+[BSD-3-Clause]: https://spdx.org/licenses/BSD-3-Clause.html
+[littlefs-fuse]: https://github.com/geky/littlefs-fuse
+[FUSE]: https://github.com/libfuse/libfuse
+[littlefs-js]: https://github.com/geky/littlefs-js
+[littlefs-js-demo]:http://littlefs.geky.net/demo.html
+[mklfs]: https://github.com/whitecatboard/Lua-RTOS-ESP32/tree/master/components/mklfs/src
+[Lua RTOS]: https://github.com/whitecatboard/Lua-RTOS-ESP32
+[Mbed OS]: https://github.com/armmbed/mbed-os
+[LittleFileSystem]: https://os.mbed.com/docs/mbed-os/latest/apis/littlefilesystem.html
+[SPIFFS]: https://github.com/pellepl/spiffs
+[Dhara]: https://github.com/dlbeer/dhara
+[littlefs-python]: https://pypi.org/project/littlefs-python/
diff --git a/components/fs/littlefs/littlefs/SPEC.md b/components/fs/littlefs/littlefs/SPEC.md
new file mode 100644
index 00000000..3663ea54
--- /dev/null
+++ b/components/fs/littlefs/littlefs/SPEC.md
@@ -0,0 +1,787 @@
+## littlefs technical specification
+
+This is the technical specification of the little filesystem. This document
+covers the technical details of how the littlefs is stored on disk for
+introspection and tooling. This document assumes you are familiar with the
+design of the littlefs, for more info on how littlefs works check
+out [DESIGN.md](DESIGN.md).
+
+```
+ | | | .---._____
+ .-----. | |
+--|o |---| littlefs |
+--| |---| |
+ '-----' '----------'
+ | | |
+```
+
+## Some quick notes
+
+- littlefs is a block-based filesystem. The disk is divided into an array of
+ evenly sized blocks that are used as the logical unit of storage.
+
+- Block pointers are stored in 32 bits, with the special value `0xffffffff`
+ representing a null block address.
+
+- In addition to the logical block size (which usually matches the erase
+ block size), littlefs also uses a program block size and read block size.
+ These determine the alignment of block device operations, but don't need
+ to be consistent for portability.
+
+- By default, all values in littlefs are stored in little-endian byte order.
+
+## Directories / Metadata pairs
+
+Metadata pairs form the backbone of littlefs and provide a system for
+distributed atomic updates. Even the superblock is stored in a metadata pair.
+
+As their name suggests, a metadata pair is stored in two blocks, with one block
+providing a backup during erase cycles in case power is lost. These two blocks
+are not necessarily sequential and may be anywhere on disk, so a "pointer" to a
+metadata pair is stored as two block pointers.
+
+On top of this, each metadata block behaves as an appendable log, containing a
+variable number of commits. Commits can be appended to the metadata log in
+order to update the metadata without requiring an erase cycles. Note that
+successive commits may supersede the metadata in previous commits. Only the
+most recent metadata should be considered valid.
+
+The high-level layout of a metadata block is fairly simple:
+
+```
+ .---------------------------------------.
+.-| revision count | entries | \
+| |-------------------+ | |
+| | | |
+| | | +-- 1st commit
+| | | |
+| | +-------------------| |
+| | | CRC | /
+| |-------------------+-------------------|
+| | entries | \
+| | | |
+| | | +-- 2nd commit
+| | +-------------------+--------------| |
+| | | CRC | padding | /
+| |----+-------------------+--------------|
+| | entries | \
+| | | |
+| | | +-- 3rd commit
+| | +-------------------+---------| |
+| | | CRC | | /
+| |---------+-------------------+ |
+| | unwritten storage | more commits
+| | | |
+| | | v
+| | |
+| | |
+| '---------------------------------------'
+'---------------------------------------'
+```
+
+Each metadata block contains a 32-bit revision count followed by a number of
+commits. Each commit contains a variable number of metadata entries followed
+by a 32-bit CRC.
+
+Note also that entries aren't necessarily word-aligned. This allows us to
+store metadata more compactly, however we can only write to addresses that are
+aligned to our program block size. This means each commit may have padding for
+alignment.
+
+Metadata block fields:
+
+1. **Revision count (32-bits)** - Incremented every erase cycle. If both blocks
+ contain valid commits, only the block with the most recent revision count
+ should be used. Sequence comparison must be used to avoid issues with
+ integer overflow.
+
+2. **CRC (32-bits)** - Detects corruption from power-loss or other write
+ issues. Uses a CRC-32 with a polynomial of `0x04c11db7` initialized
+ with `0xffffffff`.
+
+Entries themselves are stored as a 32-bit tag followed by a variable length
+blob of data. But exactly how these tags are stored is a little bit tricky.
+
+Metadata blocks support both forward and backward iteration. In order to do
+this without duplicating the space for each tag, neighboring entries have their
+tags XORed together, starting with `0xffffffff`.
+
+```
+ Forward iteration Backward iteration
+
+.-------------------. 0xffffffff .-------------------.
+| revision count | | | revision count |
+|-------------------| v |-------------------|
+| tag ~A |---> xor -> tag A | tag ~A |---> xor -> 0xffffffff
+|-------------------| | |-------------------| ^
+| data A | | | data A | |
+| | | | | |
+| | | | | |
+|-------------------| v |-------------------| |
+| tag AxB |---> xor -> tag B | tag AxB |---> xor -> tag A
+|-------------------| | |-------------------| ^
+| data B | | | data B | |
+| | | | | |
+| | | | | |
+|-------------------| v |-------------------| |
+| tag BxC |---> xor -> tag C | tag BxC |---> xor -> tag B
+|-------------------| |-------------------| ^
+| data C | | data C | |
+| | | | tag C
+| | | |
+| | | |
+'-------------------' '-------------------'
+```
+
+One last thing to note before we get into the details around tag encoding. Each
+tag contains a valid bit used to indicate if the tag and containing commit is
+valid. This valid bit is the first bit found in the tag and the commit and can
+be used to tell if we've attempted to write to the remaining space in the
+block.
+
+Here's a more complete example of metadata block containing 4 entries:
+
+```
+ .---------------------------------------.
+.-| revision count | tag ~A | \
+| |-------------------+-------------------| |
+| | data A | |
+| | | |
+| |-------------------+-------------------| |
+| | tag AxB | data B | <--. |
+| |-------------------+ | | |
+| | | | +-- 1st commit
+| | +-------------------+---------| | |
+| | | tag BxC | | <-.| |
+| |---------+-------------------+ | || |
+| | data C | || |
+| | | || |
+| |-------------------+-------------------| || |
+| | tag CxCRC | CRC | || /
+| |-------------------+-------------------| ||
+| | tag CRCxA' | data A' | || \
+| |-------------------+ | || |
+| | | || |
+| | +-------------------+----| || +-- 2nd commit
+| | | tag CRCxA' | | || |
+| |--------------+-------------------+----| || |
+| | CRC | padding | || /
+| |--------------+----+-------------------| ||
+| | tag CRCxA'' | data A'' | <---. \
+| |-------------------+ | ||| |
+| | | ||| |
+| | +-------------------+---------| ||| |
+| | | tag A''xD | | < ||| |
+| |---------+-------------------+ | |||| +-- 3rd commit
+| | data D | |||| |
+| | +---------| |||| |
+| | | tag Dx| |||| |
+| |---------+-------------------+---------| |||| |
+| |CRC | CRC | | |||| /
+| |---------+-------------------+ | ||||
+| | unwritten storage | |||| more commits
+| | | |||| |
+| | | |||| v
+| | | ||||
+| | | ||||
+| '---------------------------------------' ||||
+'---------------------------------------' |||'- most recent A
+ ||'-- most recent B
+ |'--- most recent C
+ '---- most recent D
+```
+
+## Metadata tags
+
+So in littlefs, 32-bit tags describe every type of metadata. And this means
+_every_ type of metadata, including file entries, directory fields, and
+global state. Even the CRCs used to mark the end of commits get their own tag.
+
+Because of this, the tag format contains some densely packed information. Note
+that there are multiple levels of types which break down into more info:
+
+```
+[---- 32 ----]
+[1|-- 11 --|-- 10 --|-- 10 --]
+ ^. ^ . ^ ^- length
+ |. | . '------------ id
+ |. '-----.------------------ type (type3)
+ '.-----------.------------------ valid bit
+ [-3-|-- 8 --]
+ ^ ^- chunk
+ '------- type (type1)
+```
+
+
+Before we go further, there's one important thing to note. These tags are
+**not** stored in little-endian. Tags stored in commits are actually stored
+in big-endian (and is the only thing in littlefs stored in big-endian). This
+little bit of craziness comes from the fact that the valid bit must be the
+first bit in a commit, and when converted to little-endian, the valid bit finds
+itself in byte 4. We could restructure the tag to store the valid bit lower,
+but, because none of the fields are byte-aligned, this would be more
+complicated than just storing the tag in big-endian.
+
+Another thing to note is that both the tags `0x00000000` and `0xffffffff` are
+invalid and can be used for null values.
+
+Metadata tag fields:
+
+1. **Valid bit (1-bit)** - Indicates if the tag is valid.
+
+2. **Type3 (11-bits)** - Type of the tag. This field is broken down further
+ into a 3-bit abstract type and an 8-bit chunk field. Note that the value
+ `0x000` is invalid and not assigned a type.
+
+ 1. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into
+ 8 categories that facilitate bitmasked lookups.
+
+ 2. **Chunk (8-bits)** - Chunk field used for various purposes by the different
+ abstract types. type1+chunk+id form a unique identifier for each tag in the
+ metadata block.
+
+3. **Id (10-bits)** - File id associated with the tag. Each file in a metadata
+ block gets a unique id which is used to associate tags with that file. The
+ special value `0x3ff` is used for any tags that are not associated with a
+ file, such as directory and global metadata.
+
+4. **Length (10-bits)** - Length of the data in bytes. The special value
+ `0x3ff` indicates that this tag has been deleted.
+
+## Metadata types
+
+What follows is an exhaustive list of metadata in littlefs.
+
+---
+#### `0x401` LFS_TYPE_CREATE
+
+Creates a new file with this id. Note that files in a metadata block
+don't necessarily need a create tag. All a create does is move over any
+files using this id. In this sense a create is similar to insertion into
+an imaginary array of files.
+
+The create and delete tags allow littlefs to keep files in a directory
+ordered alphabetically by filename.
+
+---
+#### `0x4ff` LFS_TYPE_DELETE
+
+Deletes the file with this id. An inverse to create, this tag moves over
+any files neighboring this id similar to a deletion from an imaginary
+array of files.
+
+---
+#### `0x0xx` LFS_TYPE_NAME
+
+Associates the id with a file name and file type.
+
+The data contains the file name stored as an ASCII string (may be expanded to
+UTF8 in the future).
+
+The chunk field in this tag indicates an 8-bit file type which can be one of
+the following.
+
+Currently, the name tag must precede any other tags associated with the id and
+can not be reassigned without deleting the file.
+
+Layout of the name tag:
+
+```
+ tag data
+[-- 32 --][--- variable length ---]
+[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
+ ^ ^ ^ ^ ^- size ^- file name
+ | | | '------ id
+ | | '----------- file type
+ | '-------------- type1 (0x0)
+ '----------------- valid bit
+```
+
+Name fields:
+
+1. **file type (8-bits)** - Type of the file.
+
+2. **file name** - File name stored as an ASCII string.
+
+---
+#### `0x001` LFS_TYPE_REG
+
+Initializes the id + name as a regular file.
+
+How each file is stored depends on its struct tag, which is described below.
+
+---
+#### `0x002` LFS_TYPE_DIR
+
+Initializes the id + name as a directory.
+
+Directories in littlefs are stored on disk as a linked-list of metadata pairs,
+each pair containing any number of files in alphabetical order. A pointer to
+the directory is stored in the struct tag, which is described below.
+
+---
+#### `0x0ff` LFS_TYPE_SUPERBLOCK
+
+Initializes the id as a superblock entry.
+
+The superblock entry is a special entry used to store format-time configuration
+and identify the filesystem.
+
+The name is a bit of a misnomer. While the superblock entry serves the same
+purpose as a superblock found in other filesystems, in littlefs the superblock
+does not get a dedicated block. Instead, the superblock entry is duplicated
+across a linked-list of metadata pairs rooted on the blocks 0 and 1. The last
+metadata pair doubles as the root directory of the filesystem.
+
+```
+ .--------. .--------. .--------. .--------. .--------.
+.| super |->| super |->| super |->| super |->| file B |
+|| block | || block | || block | || block | || file C |
+|| | || | || | || file A | || file D |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '--------' '--------' '--------' '--------'
+
+\----------------+----------------/ \----------+----------/
+ superblock pairs root directory
+```
+
+The filesystem starts with only the root directory. The superblock metadata
+pairs grow every time the root pair is compacted in order to prolong the
+life of the device exponentially.
+
+The contents of the superblock entry are stored in a name tag with the
+superblock type and an inline-struct tag. The name tag contains the magic
+string "littlefs", while the inline-struct tag contains version and
+configuration information.
+
+Layout of the superblock name tag and inline-struct tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][--- 64 ---]
+ ^ ^ ^ ^- size (8) ^- magic string ("littlefs")
+ | | '------ id (0)
+ | '------------ type (0x0ff)
+ '----------------- valid bit
+
+ tag data
+[-- 32 --][-- 32 --|-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --|-- 32 --]
+ ^ ^ ^ ^ ^- version ^- block size ^- block count
+ | | | | [-- 32 --|-- 32 --|-- 32 --]
+ | | | | [-- 32 --|-- 32 --|-- 32 --]
+ | | | | ^- name max ^- file max ^- attr max
+ | | | '- size (24)
+ | | '------ id (0)
+ | '------------ type (0x201)
+ '----------------- valid bit
+```
+
+Superblock fields:
+
+1. **Magic string (8-bytes)** - Magic string indicating the presence of
+ littlefs on the device. Must be the string "littlefs".
+
+2. **Version (32-bits)** - The version of littlefs at format time. The version
+ is encoded in a 32-bit value with the upper 16-bits containing the major
+ version, and the lower 16-bits containing the minor version.
+
+ This specification describes version 2.0 (`0x00020000`).
+
+3. **Block size (32-bits)** - Size of the logical block size used by the
+ filesystem in bytes.
+
+4. **Block count (32-bits)** - Number of blocks in the filesystem.
+
+5. **Name max (32-bits)** - Maximum size of file names in bytes.
+
+6. **File max (32-bits)** - Maximum size of files in bytes.
+
+7. **Attr max (32-bits)** - Maximum size of file attributes in bytes.
+
+The superblock must always be the first entry (id 0) in a metadata pair as well
+as be the first entry written to the block. This means that the superblock
+entry can be read from a device using offsets alone.
+
+---
+#### `0x2xx` LFS_TYPE_STRUCT
+
+Associates the id with an on-disk data structure.
+
+The exact layout of the data depends on the data structure type stored in the
+chunk field and can be one of the following.
+
+Any type of struct supersedes all other structs associated with the id. For
+example, appending a ctz-struct replaces an inline-struct on the same file.
+
+---
+#### `0x200` LFS_TYPE_DIRSTRUCT
+
+Gives the id a directory data structure.
+
+Directories in littlefs are stored on disk as a linked-list of metadata pairs,
+each pair containing any number of files in alphabetical order.
+
+```
+ |
+ v
+ .--------. .--------. .--------. .--------. .--------. .--------.
+.| file A |->| file D |->| file G |->| file I |->| file J |->| file M |
+|| file B | || file E | || file H | || | || file K | || file N |
+|| file C | || file F | || | || | || file L | || |
+|'--------' |'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '--------' '--------' '--------' '--------' '--------'
+```
+
+The dir-struct tag contains only the pointer to the first metadata-pair in the
+directory. The directory size is not known without traversing the directory.
+
+The pointer to the next metadata-pair in the directory is stored in a tail tag,
+which is described below.
+
+Layout of the dir-struct tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][--- 64 ---]
+ ^ ^ ^ ^- size (8) ^- metadata pair
+ | | '------ id
+ | '------------ type (0x200)
+ '----------------- valid bit
+```
+
+Dir-struct fields:
+
+1. **Metadata pair (8-bytes)** - Pointer to the first metadata-pair
+ in the directory.
+
+---
+#### `0x201` LFS_TYPE_INLINESTRUCT
+
+Gives the id an inline data structure.
+
+Inline structs store small files that can fit in the metadata pair. In this
+case, the file data is stored directly in the tag's data area.
+
+Layout of the inline-struct tag:
+
+```
+ tag data
+[-- 32 --][--- variable length ---]
+[1|- 11 -| 10 | 10 ][--- (size * 8) ---]
+ ^ ^ ^ ^- size ^- inline data
+ | | '------ id
+ | '------------ type (0x201)
+ '----------------- valid bit
+```
+
+Inline-struct fields:
+
+1. **Inline data** - File data stored directly in the metadata-pair.
+
+---
+#### `0x202` LFS_TYPE_CTZSTRUCT
+
+Gives the id a CTZ skip-list data structure.
+
+CTZ skip-lists store files that can not fit in the metadata pair. These files
+are stored in a skip-list in reverse, with a pointer to the head of the
+skip-list. Note that the head of the skip-list and the file size is enough
+information to read the file.
+
+How exactly CTZ skip-lists work is a bit complicated. A full explanation can be
+found in the [DESIGN.md](DESIGN.md#ctz-skip-lists).
+
+A quick summary: For every _n_th block where _n_ is divisible by
+2_ˣ_, that block contains a pointer to block _n_-2_ˣ_.
+These pointers are stored in increasing order of _x_ in each block of the file
+before the actual data.
+
+```
+ |
+ v
+.--------. .--------. .--------. .--------. .--------. .--------.
+| A |<-| D |<-| G |<-| J |<-| M |<-| P |
+| B |<-| E |--| H |<-| K |--| N | | Q |
+| C |<-| F |--| I |--| L |--| O | | |
+'--------' '--------' '--------' '--------' '--------' '--------'
+ block 0 block 1 block 2 block 3 block 4 block 5
+ 1 skip 2 skips 1 skip 3 skips 1 skip
+```
+
+Note that the maximum number of pointers in a block is bounded by the maximum
+file size divided by the block size. With 32 bits for file size, this results
+in a minimum block size of 104 bytes.
+
+Layout of the CTZ-struct tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --]
+ ^ ^ ^ ^ ^ ^- file size
+ | | | | '-------------------- file head
+ | | | '- size (8)
+ | | '------ id
+ | '------------ type (0x202)
+ '----------------- valid bit
+```
+
+CTZ-struct fields:
+
+1. **File head (32-bits)** - Pointer to the block that is the head of the
+ file's CTZ skip-list.
+
+2. **File size (32-bits)** - Size of the file in bytes.
+
+---
+#### `0x3xx` LFS_TYPE_USERATTR
+
+Attaches a user attribute to an id.
+
+littlefs has a concept of "user attributes". These are small user-provided
+attributes that can be used to store things like timestamps, hashes,
+permissions, etc.
+
+Each user attribute is uniquely identified by an 8-bit type which is stored in
+the chunk field, and the user attribute itself can be found in the tag's data.
+
+There are currently no standard user attributes and a portable littlefs
+implementation should work with any user attributes missing.
+
+Layout of the user-attr tag:
+
+```
+ tag data
+[-- 32 --][--- variable length ---]
+[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
+ ^ ^ ^ ^ ^- size ^- attr data
+ | | | '------ id
+ | | '----------- attr type
+ | '-------------- type1 (0x3)
+ '----------------- valid bit
+```
+
+User-attr fields:
+
+1. **Attr type (8-bits)** - Type of the user attributes.
+
+2. **Attr data** - The data associated with the user attribute.
+
+---
+#### `0x6xx` LFS_TYPE_TAIL
+
+Provides the tail pointer for the metadata pair itself.
+
+The metadata pair's tail pointer is used in littlefs for a linked-list
+containing all metadata pairs. The chunk field contains the type of the tail,
+which indicates if the following metadata pair is a part of the directory
+(hard-tail) or only used to traverse the filesystem (soft-tail).
+
+```
+ .--------.
+ .| dir A |-.
+ ||softtail| |
+.--------| |-'
+| |'--------'
+| '---|--|-'
+| .-' '-------------.
+| v v
+| .--------. .--------. .--------.
+'->| dir B |->| dir B |->| dir C |
+ ||hardtail| ||softtail| || |
+ || | || | || |
+ |'--------' |'--------' |'--------'
+ '--------' '--------' '--------'
+```
+
+Currently any type supersedes any other preceding tails in the metadata pair,
+but this may change if additional metadata pair state is added.
+
+A note about the metadata pair linked-list: Normally, this linked-list contains
+every metadata pair in the filesystem. However, there are some operations that
+can cause this linked-list to become out of sync if a power-loss were to occur.
+When this happens, littlefs sets the "sync" flag in the global state. How
+exactly this flag is stored is described below.
+
+When the sync flag is set:
+
+1. The linked-list may contain an orphaned directory that has been removed in
+ the filesystem.
+2. The linked-list may contain a metadata pair with a bad block that has been
+ replaced in the filesystem.
+
+If the sync flag is set, the threaded linked-list must be checked for these
+errors before it can be used reliably. Note that the threaded linked-list can
+be ignored if littlefs is mounted read-only.
+
+Layout of the tail tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --]
+[1| 3| 8 | 10 | 10 ][--- 64 ---]
+ ^ ^ ^ ^ ^- size (8) ^- metadata pair
+ | | | '------ id
+ | | '---------- tail type
+ | '------------- type1 (0x6)
+ '---------------- valid bit
+```
+
+Tail fields:
+
+1. **Tail type (8-bits)** - Type of the tail pointer.
+
+2. **Metadata pair (8-bytes)** - Pointer to the next metadata-pair.
+
+---
+#### `0x600` LFS_TYPE_SOFTTAIL
+
+Provides a tail pointer that points to the next metadata pair in the
+filesystem.
+
+In this case, the next metadata pair is not a part of our current directory
+and should only be followed when traversing the entire filesystem.
+
+---
+#### `0x601` LFS_TYPE_HARDTAIL
+
+Provides a tail pointer that points to the next metadata pair in the
+directory.
+
+In this case, the next metadata pair belongs to the current directory. Note
+that because directories in littlefs are sorted alphabetically, the next
+metadata pair should only contain filenames greater than any filename in the
+current pair.
+
+---
+#### `0x7xx` LFS_TYPE_GSTATE
+
+Provides delta bits for global state entries.
+
+littlefs has a concept of "global state". This is a small set of state that
+can be updated by a commit to _any_ metadata pair in the filesystem.
+
+The way this works is that the global state is stored as a set of deltas
+distributed across the filesystem such that the global state can be found by
+the xor-sum of these deltas.
+
+```
+ .--------. .--------. .--------. .--------. .--------.
+.| |->| gdelta |->| |->| gdelta |->| gdelta |
+|| | || 0x23 | || | || 0xff | || 0xce |
+|| | || | || | || | || |
+|'--------' |'--------' |'--------' |'--------' |'--------'
+'--------' '----|---' '--------' '----|---' '----|---'
+ v v v
+ 0x00 --> xor ------------------> xor ------> xor --> gstate = 0x12
+```
+
+Note that storing globals this way is very expensive in terms of storage usage,
+so any global state should be kept very small.
+
+The size and format of each piece of global state depends on the type, which
+is stored in the chunk field. Currently, the only global state is move state,
+which is outlined below.
+
+---
+#### `0x7ff` LFS_TYPE_MOVESTATE
+
+Provides delta bits for the global move state.
+
+The move state in littlefs is used to store info about operations that could
+cause to filesystem to go out of sync if the power is lost. The operations
+where this could occur is moves of files between metadata pairs and any
+operation that changes metadata pairs on the threaded linked-list.
+
+In the case of moves, the move state contains a tag + metadata pair describing
+the source of the ongoing move. If this tag is non-zero, that means that power
+was lost during a move, and the file exists in two different locations. If this
+happens, the source of the move should be considered deleted, and the move
+should be completed (the source should be deleted) before any other write
+operations to the filesystem.
+
+In the case of operations to the threaded linked-list, a single "sync" bit is
+used to indicate that a modification is ongoing. If this sync flag is set, the
+threaded linked-list will need to be checked for errors before it can be used
+reliably. The exact cases to check for are described above in the tail tag.
+
+Layout of the move state:
+
+```
+ tag data
+[-- 32 --][-- 32 --|-- 32 --|-- 32 --]
+[1|- 11 -| 10 | 10 ][1|- 11 -| 10 | 10 |--- 64 ---]
+ ^ ^ ^ ^ ^ ^ ^ ^- padding (0) ^- metadata pair
+ | | | | | | '------ move id
+ | | | | | '------------ move type
+ | | | | '----------------- sync bit
+ | | | |
+ | | | '- size (12)
+ | | '------ id (0x3ff)
+ | '------------ type (0x7ff)
+ '----------------- valid bit
+```
+
+Move state fields:
+
+1. **Sync bit (1-bit)** - Indicates if the metadata pair threaded linked-list
+ is in-sync. If set, the threaded linked-list should be checked for errors.
+
+2. **Move type (11-bits)** - Type of move being performed. Must be either
+ `0x000`, indicating no move, or `0x4ff` indicating the source file should
+ be deleted.
+
+3. **Move id (10-bits)** - The file id being moved.
+
+4. **Metadata pair (8-bytes)** - Pointer to the metadata-pair containing
+ the move.
+
+---
+#### `0x5xx` LFS_TYPE_CRC
+
+Last but not least, the CRC tag marks the end of a commit and provides a
+checksum for any commits to the metadata block.
+
+The first 32-bits of the data contain a CRC-32 with a polynomial of
+`0x04c11db7` initialized with `0xffffffff`. This CRC provides a checksum for
+all metadata since the previous CRC tag, including the CRC tag itself. For
+the first commit, this includes the revision count for the metadata block.
+
+However, the size of the data is not limited to 32-bits. The data field may
+larger to pad the commit to the next program-aligned boundary.
+
+In addition, the CRC tag's chunk field contains a set of flags which can
+change the behaviour of commits. Currently the only flag in use is the lowest
+bit, which determines the expected state of the valid bit for any following
+tags. This is used to guarantee that unwritten storage in a metadata block
+will be detected as invalid.
+
+Layout of the CRC tag:
+
+```
+ tag data
+[-- 32 --][-- 32 --|--- variable length ---]
+[1| 3| 8 | 10 | 10 ][-- 32 --|--- (size * 8 - 32) ---]
+ ^ ^ ^ ^ ^ ^- crc ^- padding
+ | | | | '- size
+ | | | '------ id (0x3ff)
+ | | '----------- valid state
+ | '-------------- type1 (0x5)
+ '----------------- valid bit
+```
+
+CRC fields:
+
+1. **Valid state (1-bit)** - Indicates the expected value of the valid bit for
+ any tags in the next commit.
+
+2. **CRC (32-bits)** - CRC-32 with a polynomial of `0x04c11db7` initialized
+ with `0xffffffff`.
+
+3. **Padding** - Padding to the next program-aligned boundary. No guarantees
+ are made about the contents.
+
+---
diff --git a/components/fs/littlefs/littlefs/bd/lfs_filebd.c b/components/fs/littlefs/littlefs/bd/lfs_filebd.c
new file mode 100644
index 00000000..ee0c31e2
--- /dev/null
+++ b/components/fs/littlefs/littlefs/bd/lfs_filebd.c
@@ -0,0 +1,219 @@
+/*
+ * Block device emulated in a file
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "bd/lfs_filebd.h"
+
+#include
+#include
+#include
+
+#ifdef _WIN32
+#include
+#endif
+
+int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
+ const struct lfs_filebd_config *bdcfg) {
+ LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
+ "\"%s\", "
+ "%p {.erase_value=%"PRId32"})",
+ (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ path, (void*)bdcfg, bdcfg->erase_value);
+ lfs_filebd_t *bd = cfg->context;
+ bd->cfg = bdcfg;
+
+ // open file
+ #ifdef _WIN32
+ bd->fd = open(path, O_RDWR | O_CREAT | O_BINARY, 0666);
+ #else
+ bd->fd = open(path, O_RDWR | O_CREAT, 0666);
+ #endif
+
+ if (bd->fd < 0) {
+ int err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err);
+ return err;
+ }
+
+ LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", 0);
+ return 0;
+}
+
+int lfs_filebd_create(const struct lfs_config *cfg, const char *path) {
+ LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
+ "\"%s\")",
+ (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ path);
+ static const struct lfs_filebd_config defaults = {.erase_value=-1};
+ int err = lfs_filebd_createcfg(cfg, path, &defaults);
+ LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err);
+ return err;
+}
+
+int lfs_filebd_destroy(const struct lfs_config *cfg) {
+ LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)cfg);
+ lfs_filebd_t *bd = cfg->context;
+ int err = close(bd->fd);
+ if (err < 0) {
+ err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", err);
+ return err;
+ }
+ LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", 0);
+ return 0;
+}
+
+int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size) {
+ LFS_FILEBD_TRACE("lfs_filebd_read(%p, "
+ "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
+ (void*)cfg, block, off, buffer, size);
+ lfs_filebd_t *bd = cfg->context;
+
+ // check if read is valid
+ LFS_ASSERT(off % cfg->read_size == 0);
+ LFS_ASSERT(size % cfg->read_size == 0);
+ LFS_ASSERT(block < cfg->block_count);
+
+ // zero for reproducibility (in case file is truncated)
+ if (bd->cfg->erase_value != -1) {
+ memset(buffer, bd->cfg->erase_value, size);
+ }
+
+ // read
+ off_t res1 = lseek(bd->fd,
+ (off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
+ if (res1 < 0) {
+ int err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
+ return err;
+ }
+
+ ssize_t res2 = read(bd->fd, buffer, size);
+ if (res2 < 0) {
+ int err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
+ return err;
+ }
+
+ LFS_FILEBD_TRACE("lfs_filebd_read -> %d", 0);
+ return 0;
+}
+
+int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size) {
+ LFS_FILEBD_TRACE("lfs_filebd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
+ (void*)cfg, block, off, buffer, size);
+ lfs_filebd_t *bd = cfg->context;
+
+ // check if write is valid
+ LFS_ASSERT(off % cfg->prog_size == 0);
+ LFS_ASSERT(size % cfg->prog_size == 0);
+ LFS_ASSERT(block < cfg->block_count);
+
+ // check that data was erased? only needed for testing
+ if (bd->cfg->erase_value != -1) {
+ off_t res1 = lseek(bd->fd,
+ (off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
+ if (res1 < 0) {
+ int err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
+ return err;
+ }
+
+ for (lfs_off_t i = 0; i < size; i++) {
+ uint8_t c;
+ ssize_t res2 = read(bd->fd, &c, 1);
+ if (res2 < 0) {
+ int err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
+ return err;
+ }
+
+ LFS_ASSERT(c == bd->cfg->erase_value);
+ }
+ }
+
+ // program data
+ off_t res1 = lseek(bd->fd,
+ (off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
+ if (res1 < 0) {
+ int err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
+ return err;
+ }
+
+ ssize_t res2 = write(bd->fd, buffer, size);
+ if (res2 < 0) {
+ int err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
+ return err;
+ }
+
+ LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", 0);
+ return 0;
+}
+
+int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
+ LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
+ lfs_filebd_t *bd = cfg->context;
+
+ // check if erase is valid
+ LFS_ASSERT(block < cfg->block_count);
+
+ // erase, only needed for testing
+ if (bd->cfg->erase_value != -1) {
+ off_t res1 = lseek(bd->fd, (off_t)block*cfg->block_size, SEEK_SET);
+ if (res1 < 0) {
+ int err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
+ return err;
+ }
+
+ for (lfs_off_t i = 0; i < cfg->block_size; i++) {
+ ssize_t res2 = write(bd->fd, &(uint8_t){bd->cfg->erase_value}, 1);
+ if (res2 < 0) {
+ int err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
+ return err;
+ }
+ }
+ }
+
+ LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", 0);
+ return 0;
+}
+
+int lfs_filebd_sync(const struct lfs_config *cfg) {
+ LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
+ // file sync
+ lfs_filebd_t *bd = cfg->context;
+ #ifdef _WIN32
+ int err = FlushFileBuffers((HANDLE) _get_osfhandle(bd->fd)) ? 0 : -1;
+ #else
+ int err = fsync(bd->fd);
+ #endif
+ if (err) {
+ err = -errno;
+ LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
+ return err;
+ }
+
+ LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
+ return 0;
+}
diff --git a/components/fs/littlefs/littlefs/bd/lfs_filebd.h b/components/fs/littlefs/littlefs/bd/lfs_filebd.h
new file mode 100644
index 00000000..1a9456c5
--- /dev/null
+++ b/components/fs/littlefs/littlefs/bd/lfs_filebd.h
@@ -0,0 +1,74 @@
+/*
+ * Block device emulated in a file
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef LFS_FILEBD_H
+#define LFS_FILEBD_H
+
+#include "lfs.h"
+#include "lfs_util.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+// Block device specific tracing
+#ifdef LFS_FILEBD_YES_TRACE
+#define LFS_FILEBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
+#else
+#define LFS_FILEBD_TRACE(...)
+#endif
+
+// filebd config (optional)
+struct lfs_filebd_config {
+ // 8-bit erase value to use for simulating erases. -1 does not simulate
+ // erases, which can speed up testing by avoiding all the extra block-device
+ // operations to store the erase value.
+ int32_t erase_value;
+};
+
+// filebd state
+typedef struct lfs_filebd {
+ int fd;
+ const struct lfs_filebd_config *cfg;
+} lfs_filebd_t;
+
+
+// Create a file block device using the geometry in lfs_config
+int lfs_filebd_create(const struct lfs_config *cfg, const char *path);
+int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
+ const struct lfs_filebd_config *bdcfg);
+
+// Clean up memory associated with block device
+int lfs_filebd_destroy(const struct lfs_config *cfg);
+
+// Read a block
+int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size);
+
+// Program a block
+//
+// The block must have previously been erased.
+int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size);
+
+// Erase a block
+//
+// A block must be erased before being programmed. The
+// state of an erased block is undefined.
+int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block);
+
+// Sync the block device
+int lfs_filebd_sync(const struct lfs_config *cfg);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/components/fs/littlefs/littlefs/bd/lfs_rambd.c b/components/fs/littlefs/littlefs/bd/lfs_rambd.c
new file mode 100644
index 00000000..39bb8150
--- /dev/null
+++ b/components/fs/littlefs/littlefs/bd/lfs_rambd.c
@@ -0,0 +1,143 @@
+/*
+ * Block device emulated in RAM
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "bd/lfs_rambd.h"
+
+int lfs_rambd_createcfg(const struct lfs_config *cfg,
+ const struct lfs_rambd_config *bdcfg) {
+ LFS_RAMBD_TRACE("lfs_rambd_createcfg(%p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
+ "%p {.erase_value=%"PRId32", .buffer=%p})",
+ (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ (void*)bdcfg, bdcfg->erase_value, bdcfg->buffer);
+ lfs_rambd_t *bd = cfg->context;
+ bd->cfg = bdcfg;
+
+ // allocate buffer?
+ if (bd->cfg->buffer) {
+ bd->buffer = bd->cfg->buffer;
+ } else {
+ bd->buffer = lfs_malloc(cfg->block_size * cfg->block_count);
+ if (!bd->buffer) {
+ LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", LFS_ERR_NOMEM);
+ return LFS_ERR_NOMEM;
+ }
+ }
+
+ // zero for reproducibility?
+ if (bd->cfg->erase_value != -1) {
+ memset(bd->buffer, bd->cfg->erase_value,
+ cfg->block_size * cfg->block_count);
+ } else {
+ memset(bd->buffer, 0, cfg->block_size * cfg->block_count);
+ }
+
+ LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0);
+ return 0;
+}
+
+int lfs_rambd_create(const struct lfs_config *cfg) {
+ LFS_RAMBD_TRACE("lfs_rambd_create(%p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32"})",
+ (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count);
+ static const struct lfs_rambd_config defaults = {.erase_value=-1};
+ int err = lfs_rambd_createcfg(cfg, &defaults);
+ LFS_RAMBD_TRACE("lfs_rambd_create -> %d", err);
+ return err;
+}
+
+int lfs_rambd_destroy(const struct lfs_config *cfg) {
+ LFS_RAMBD_TRACE("lfs_rambd_destroy(%p)", (void*)cfg);
+ // clean up memory
+ lfs_rambd_t *bd = cfg->context;
+ if (!bd->cfg->buffer) {
+ lfs_free(bd->buffer);
+ }
+ LFS_RAMBD_TRACE("lfs_rambd_destroy -> %d", 0);
+ return 0;
+}
+
+int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size) {
+ LFS_RAMBD_TRACE("lfs_rambd_read(%p, "
+ "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
+ (void*)cfg, block, off, buffer, size);
+ lfs_rambd_t *bd = cfg->context;
+
+ // check if read is valid
+ LFS_ASSERT(off % cfg->read_size == 0);
+ LFS_ASSERT(size % cfg->read_size == 0);
+ LFS_ASSERT(block < cfg->block_count);
+
+ // read data
+ memcpy(buffer, &bd->buffer[block*cfg->block_size + off], size);
+
+ LFS_RAMBD_TRACE("lfs_rambd_read -> %d", 0);
+ return 0;
+}
+
+int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size) {
+ LFS_RAMBD_TRACE("lfs_rambd_prog(%p, "
+ "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
+ (void*)cfg, block, off, buffer, size);
+ lfs_rambd_t *bd = cfg->context;
+
+ // check if write is valid
+ LFS_ASSERT(off % cfg->prog_size == 0);
+ LFS_ASSERT(size % cfg->prog_size == 0);
+ LFS_ASSERT(block < cfg->block_count);
+
+ // check that data was erased? only needed for testing
+ if (bd->cfg->erase_value != -1) {
+ for (lfs_off_t i = 0; i < size; i++) {
+ LFS_ASSERT(bd->buffer[block*cfg->block_size + off + i] ==
+ bd->cfg->erase_value);
+ }
+ }
+
+ // program data
+ memcpy(&bd->buffer[block*cfg->block_size + off], buffer, size);
+
+ LFS_RAMBD_TRACE("lfs_rambd_prog -> %d", 0);
+ return 0;
+}
+
+int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) {
+ LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
+ lfs_rambd_t *bd = cfg->context;
+
+ // check if erase is valid
+ LFS_ASSERT(block < cfg->block_count);
+
+ // erase, only needed for testing
+ if (bd->cfg->erase_value != -1) {
+ memset(&bd->buffer[block*cfg->block_size],
+ bd->cfg->erase_value, cfg->block_size);
+ }
+
+ LFS_RAMBD_TRACE("lfs_rambd_erase -> %d", 0);
+ return 0;
+}
+
+int lfs_rambd_sync(const struct lfs_config *cfg) {
+ LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)cfg);
+ // sync does nothing because we aren't backed by anything real
+ (void)cfg;
+ LFS_RAMBD_TRACE("lfs_rambd_sync -> %d", 0);
+ return 0;
+}
diff --git a/components/fs/littlefs/littlefs/bd/lfs_rambd.h b/components/fs/littlefs/littlefs/bd/lfs_rambd.h
new file mode 100644
index 00000000..3a70bc6e
--- /dev/null
+++ b/components/fs/littlefs/littlefs/bd/lfs_rambd.h
@@ -0,0 +1,76 @@
+/*
+ * Block device emulated in RAM
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef LFS_RAMBD_H
+#define LFS_RAMBD_H
+
+#include "lfs.h"
+#include "lfs_util.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+// Block device specific tracing
+#ifdef LFS_RAMBD_YES_TRACE
+#define LFS_RAMBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
+#else
+#define LFS_RAMBD_TRACE(...)
+#endif
+
+// rambd config (optional)
+struct lfs_rambd_config {
+ // 8-bit erase value to simulate erasing with. -1 indicates no erase
+ // occurs, which is still a valid block device
+ int32_t erase_value;
+
+ // Optional statically allocated buffer for the block device.
+ void *buffer;
+};
+
+// rambd state
+typedef struct lfs_rambd {
+ uint8_t *buffer;
+ const struct lfs_rambd_config *cfg;
+} lfs_rambd_t;
+
+
+// Create a RAM block device using the geometry in lfs_config
+int lfs_rambd_create(const struct lfs_config *cfg);
+int lfs_rambd_createcfg(const struct lfs_config *cfg,
+ const struct lfs_rambd_config *bdcfg);
+
+// Clean up memory associated with block device
+int lfs_rambd_destroy(const struct lfs_config *cfg);
+
+// Read a block
+int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size);
+
+// Program a block
+//
+// The block must have previously been erased.
+int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size);
+
+// Erase a block
+//
+// A block must be erased before being programmed. The
+// state of an erased block is undefined.
+int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block);
+
+// Sync the block device
+int lfs_rambd_sync(const struct lfs_config *cfg);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/components/fs/littlefs/littlefs/bd/lfs_testbd.c b/components/fs/littlefs/littlefs/bd/lfs_testbd.c
new file mode 100644
index 00000000..1f0877d4
--- /dev/null
+++ b/components/fs/littlefs/littlefs/bd/lfs_testbd.c
@@ -0,0 +1,303 @@
+/*
+ * Testing block device, wraps filebd and rambd while providing a bunch
+ * of hooks for testing littlefs in various conditions.
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "bd/lfs_testbd.h"
+
+#include
+
+
+int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
+ const struct lfs_testbd_config *bdcfg) {
+ LFS_TESTBD_TRACE("lfs_testbd_createcfg(%p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
+ "\"%s\", "
+ "%p {.erase_value=%"PRId32", .erase_cycles=%"PRIu32", "
+ ".badblock_behavior=%"PRIu8", .power_cycles=%"PRIu32", "
+ ".buffer=%p, .wear_buffer=%p})",
+ (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ path, (void*)bdcfg, bdcfg->erase_value, bdcfg->erase_cycles,
+ bdcfg->badblock_behavior, bdcfg->power_cycles,
+ bdcfg->buffer, bdcfg->wear_buffer);
+ lfs_testbd_t *bd = cfg->context;
+ bd->cfg = bdcfg;
+
+ // setup testing things
+ bd->persist = path;
+ bd->power_cycles = bd->cfg->power_cycles;
+
+ if (bd->cfg->erase_cycles) {
+ if (bd->cfg->wear_buffer) {
+ bd->wear = bd->cfg->wear_buffer;
+ } else {
+ bd->wear = lfs_malloc(sizeof(lfs_testbd_wear_t)*cfg->block_count);
+ if (!bd->wear) {
+ LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", LFS_ERR_NOMEM);
+ return LFS_ERR_NOMEM;
+ }
+ }
+
+ memset(bd->wear, 0, sizeof(lfs_testbd_wear_t) * cfg->block_count);
+ }
+
+ // create underlying block device
+ if (bd->persist) {
+ bd->u.file.cfg = (struct lfs_filebd_config){
+ .erase_value = bd->cfg->erase_value,
+ };
+ int err = lfs_filebd_createcfg(cfg, path, &bd->u.file.cfg);
+ LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
+ return err;
+ } else {
+ bd->u.ram.cfg = (struct lfs_rambd_config){
+ .erase_value = bd->cfg->erase_value,
+ .buffer = bd->cfg->buffer,
+ };
+ int err = lfs_rambd_createcfg(cfg, &bd->u.ram.cfg);
+ LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
+ return err;
+ }
+}
+
+int lfs_testbd_create(const struct lfs_config *cfg, const char *path) {
+ LFS_TESTBD_TRACE("lfs_testbd_create(%p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
+ "\"%s\")",
+ (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ path);
+ static const struct lfs_testbd_config defaults = {.erase_value=-1};
+ int err = lfs_testbd_createcfg(cfg, path, &defaults);
+ LFS_TESTBD_TRACE("lfs_testbd_create -> %d", err);
+ return err;
+}
+
+int lfs_testbd_destroy(const struct lfs_config *cfg) {
+ LFS_TESTBD_TRACE("lfs_testbd_destroy(%p)", (void*)cfg);
+ lfs_testbd_t *bd = cfg->context;
+ if (bd->cfg->erase_cycles && !bd->cfg->wear_buffer) {
+ lfs_free(bd->wear);
+ }
+
+ if (bd->persist) {
+ int err = lfs_filebd_destroy(cfg);
+ LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
+ return err;
+ } else {
+ int err = lfs_rambd_destroy(cfg);
+ LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
+ return err;
+ }
+}
+
+/// Internal mapping to block devices ///
+static int lfs_testbd_rawread(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size) {
+ lfs_testbd_t *bd = cfg->context;
+ if (bd->persist) {
+ return lfs_filebd_read(cfg, block, off, buffer, size);
+ } else {
+ return lfs_rambd_read(cfg, block, off, buffer, size);
+ }
+}
+
+static int lfs_testbd_rawprog(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size) {
+ lfs_testbd_t *bd = cfg->context;
+ if (bd->persist) {
+ return lfs_filebd_prog(cfg, block, off, buffer, size);
+ } else {
+ return lfs_rambd_prog(cfg, block, off, buffer, size);
+ }
+}
+
+static int lfs_testbd_rawerase(const struct lfs_config *cfg,
+ lfs_block_t block) {
+ lfs_testbd_t *bd = cfg->context;
+ if (bd->persist) {
+ return lfs_filebd_erase(cfg, block);
+ } else {
+ return lfs_rambd_erase(cfg, block);
+ }
+}
+
+static int lfs_testbd_rawsync(const struct lfs_config *cfg) {
+ lfs_testbd_t *bd = cfg->context;
+ if (bd->persist) {
+ return lfs_filebd_sync(cfg);
+ } else {
+ return lfs_rambd_sync(cfg);
+ }
+}
+
+/// block device API ///
+int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size) {
+ LFS_TESTBD_TRACE("lfs_testbd_read(%p, "
+ "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
+ (void*)cfg, block, off, buffer, size);
+ lfs_testbd_t *bd = cfg->context;
+
+ // check if read is valid
+ LFS_ASSERT(off % cfg->read_size == 0);
+ LFS_ASSERT(size % cfg->read_size == 0);
+ LFS_ASSERT(block < cfg->block_count);
+
+ // block bad?
+ if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles &&
+ bd->cfg->badblock_behavior == LFS_TESTBD_BADBLOCK_READERROR) {
+ LFS_TESTBD_TRACE("lfs_testbd_read -> %d", LFS_ERR_CORRUPT);
+ return LFS_ERR_CORRUPT;
+ }
+
+ // read
+ int err = lfs_testbd_rawread(cfg, block, off, buffer, size);
+ LFS_TESTBD_TRACE("lfs_testbd_read -> %d", err);
+ return err;
+}
+
+int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size) {
+ LFS_TESTBD_TRACE("lfs_testbd_prog(%p, "
+ "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
+ (void*)cfg, block, off, buffer, size);
+ lfs_testbd_t *bd = cfg->context;
+
+ // check if write is valid
+ LFS_ASSERT(off % cfg->prog_size == 0);
+ LFS_ASSERT(size % cfg->prog_size == 0);
+ LFS_ASSERT(block < cfg->block_count);
+
+ // block bad?
+ if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles) {
+ if (bd->cfg->badblock_behavior ==
+ LFS_TESTBD_BADBLOCK_PROGERROR) {
+ LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", LFS_ERR_CORRUPT);
+ return LFS_ERR_CORRUPT;
+ } else if (bd->cfg->badblock_behavior ==
+ LFS_TESTBD_BADBLOCK_PROGNOOP ||
+ bd->cfg->badblock_behavior ==
+ LFS_TESTBD_BADBLOCK_ERASENOOP) {
+ LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
+ return 0;
+ }
+ }
+
+ // prog
+ int err = lfs_testbd_rawprog(cfg, block, off, buffer, size);
+ if (err) {
+ LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", err);
+ return err;
+ }
+
+ // lose power?
+ if (bd->power_cycles > 0) {
+ bd->power_cycles -= 1;
+ if (bd->power_cycles == 0) {
+ // sync to make sure we persist the last changes
+ LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
+ // simulate power loss
+ exit(33);
+ }
+ }
+
+ LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
+ return 0;
+}
+
+int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
+ LFS_TESTBD_TRACE("lfs_testbd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
+ lfs_testbd_t *bd = cfg->context;
+
+ // check if erase is valid
+ LFS_ASSERT(block < cfg->block_count);
+
+ // block bad?
+ if (bd->cfg->erase_cycles) {
+ if (bd->wear[block] >= bd->cfg->erase_cycles) {
+ if (bd->cfg->badblock_behavior ==
+ LFS_TESTBD_BADBLOCK_ERASEERROR) {
+ LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", LFS_ERR_CORRUPT);
+ return LFS_ERR_CORRUPT;
+ } else if (bd->cfg->badblock_behavior ==
+ LFS_TESTBD_BADBLOCK_ERASENOOP) {
+ LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", 0);
+ return 0;
+ }
+ } else {
+ // mark wear
+ bd->wear[block] += 1;
+ }
+ }
+
+ // erase
+ int err = lfs_testbd_rawerase(cfg, block);
+ if (err) {
+ LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", err);
+ return err;
+ }
+
+ // lose power?
+ if (bd->power_cycles > 0) {
+ bd->power_cycles -= 1;
+ if (bd->power_cycles == 0) {
+ // sync to make sure we persist the last changes
+ LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
+ // simulate power loss
+ exit(33);
+ }
+ }
+
+ LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
+ return 0;
+}
+
+int lfs_testbd_sync(const struct lfs_config *cfg) {
+ LFS_TESTBD_TRACE("lfs_testbd_sync(%p)", (void*)cfg);
+ int err = lfs_testbd_rawsync(cfg);
+ LFS_TESTBD_TRACE("lfs_testbd_sync -> %d", err);
+ return err;
+}
+
+
+/// simulated wear operations ///
+lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
+ lfs_block_t block) {
+ LFS_TESTBD_TRACE("lfs_testbd_getwear(%p, %"PRIu32")", (void*)cfg, block);
+ lfs_testbd_t *bd = cfg->context;
+
+ // check if block is valid
+ LFS_ASSERT(bd->cfg->erase_cycles);
+ LFS_ASSERT(block < cfg->block_count);
+
+ LFS_TESTBD_TRACE("lfs_testbd_getwear -> %"PRIu32, bd->wear[block]);
+ return bd->wear[block];
+}
+
+int lfs_testbd_setwear(const struct lfs_config *cfg,
+ lfs_block_t block, lfs_testbd_wear_t wear) {
+ LFS_TESTBD_TRACE("lfs_testbd_setwear(%p, %"PRIu32")", (void*)cfg, block);
+ lfs_testbd_t *bd = cfg->context;
+
+ // check if block is valid
+ LFS_ASSERT(bd->cfg->erase_cycles);
+ LFS_ASSERT(block < cfg->block_count);
+
+ bd->wear[block] = wear;
+
+ LFS_TESTBD_TRACE("lfs_testbd_setwear -> %d", 0);
+ return 0;
+}
diff --git a/components/fs/littlefs/littlefs/bd/lfs_testbd.h b/components/fs/littlefs/littlefs/bd/lfs_testbd.h
new file mode 100644
index 00000000..61679e5e
--- /dev/null
+++ b/components/fs/littlefs/littlefs/bd/lfs_testbd.h
@@ -0,0 +1,142 @@
+/*
+ * Testing block device, wraps filebd and rambd while providing a bunch
+ * of hooks for testing littlefs in various conditions.
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef LFS_TESTBD_H
+#define LFS_TESTBD_H
+
+#include "lfs.h"
+#include "lfs_util.h"
+#include "bd/lfs_rambd.h"
+#include "bd/lfs_filebd.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+// Block device specific tracing
+#ifdef LFS_TESTBD_YES_TRACE
+#define LFS_TESTBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
+#else
+#define LFS_TESTBD_TRACE(...)
+#endif
+
+// Mode determining how "bad blocks" behave during testing. This simulates
+// some real-world circumstances such as progs not sticking (prog-noop),
+// a readonly disk (erase-noop), and ECC failures (read-error).
+//
+// Not that read-noop is not allowed. Read _must_ return a consistent (but
+// may be arbitrary) value on every read.
+enum lfs_testbd_badblock_behavior {
+ LFS_TESTBD_BADBLOCK_PROGERROR,
+ LFS_TESTBD_BADBLOCK_ERASEERROR,
+ LFS_TESTBD_BADBLOCK_READERROR,
+ LFS_TESTBD_BADBLOCK_PROGNOOP,
+ LFS_TESTBD_BADBLOCK_ERASENOOP,
+};
+
+// Type for measuring wear
+typedef uint32_t lfs_testbd_wear_t;
+typedef int32_t lfs_testbd_swear_t;
+
+// testbd config, this is required for testing
+struct lfs_testbd_config {
+ // 8-bit erase value to use for simulating erases. -1 does not simulate
+ // erases, which can speed up testing by avoiding all the extra block-device
+ // operations to store the erase value.
+ int32_t erase_value;
+
+ // Number of erase cycles before a block becomes "bad". The exact behavior
+ // of bad blocks is controlled by the badblock_mode.
+ uint32_t erase_cycles;
+
+ // The mode determining how bad blocks fail
+ uint8_t badblock_behavior;
+
+ // Number of write operations (erase/prog) before forcefully killing
+ // the program with exit. Simulates power-loss. 0 disables.
+ uint32_t power_cycles;
+
+ // Optional buffer for RAM block device.
+ void *buffer;
+
+ // Optional buffer for wear
+ void *wear_buffer;
+};
+
+// testbd state
+typedef struct lfs_testbd {
+ union {
+ struct {
+ lfs_filebd_t bd;
+ struct lfs_filebd_config cfg;
+ } file;
+ struct {
+ lfs_rambd_t bd;
+ struct lfs_rambd_config cfg;
+ } ram;
+ } u;
+
+ bool persist;
+ uint32_t power_cycles;
+ lfs_testbd_wear_t *wear;
+
+ const struct lfs_testbd_config *cfg;
+} lfs_testbd_t;
+
+
+/// Block device API ///
+
+// Create a test block device using the geometry in lfs_config
+//
+// Note that filebd is used if a path is provided, if path is NULL
+// testbd will use rambd which can be much faster.
+int lfs_testbd_create(const struct lfs_config *cfg, const char *path);
+int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
+ const struct lfs_testbd_config *bdcfg);
+
+// Clean up memory associated with block device
+int lfs_testbd_destroy(const struct lfs_config *cfg);
+
+// Read a block
+int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size);
+
+// Program a block
+//
+// The block must have previously been erased.
+int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size);
+
+// Erase a block
+//
+// A block must be erased before being programmed. The
+// state of an erased block is undefined.
+int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block);
+
+// Sync the block device
+int lfs_testbd_sync(const struct lfs_config *cfg);
+
+
+/// Additional extended API for driving test features ///
+
+// Get simulated wear on a given block
+lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
+ lfs_block_t block);
+
+// Manually set simulated wear on a given block
+int lfs_testbd_setwear(const struct lfs_config *cfg,
+ lfs_block_t block, lfs_testbd_wear_t wear);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/components/fs/littlefs/littlefs/lfs.c b/components/fs/littlefs/littlefs/lfs.c
new file mode 100644
index 00000000..26280fa8
--- /dev/null
+++ b/components/fs/littlefs/littlefs/lfs.c
@@ -0,0 +1,5817 @@
+/*
+ * The little filesystem
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "lfs.h"
+#include "lfs_util.h"
+
+
+// some constants used throughout the code
+#define LFS_BLOCK_NULL ((lfs_block_t)-1)
+#define LFS_BLOCK_INLINE ((lfs_block_t)-2)
+
+enum {
+ LFS_OK_RELOCATED = 1,
+ LFS_OK_DROPPED = 2,
+ LFS_OK_ORPHANED = 3,
+};
+
+enum {
+ LFS_CMP_EQ = 0,
+ LFS_CMP_LT = 1,
+ LFS_CMP_GT = 2,
+};
+
+
+/// Caching block device operations ///
+
+static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
+ // do not zero, cheaper if cache is readonly or only going to be
+ // written with identical data (during relocates)
+ (void)lfs;
+ rcache->block = LFS_BLOCK_NULL;
+}
+
+static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
+ // zero to avoid information leak
+ memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
+ pcache->block = LFS_BLOCK_NULL;
+}
+
+static int lfs_bd_read(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
+ lfs_block_t block, lfs_off_t off,
+ void *buffer, lfs_size_t size) {
+ uint8_t *data = buffer;
+ if (block >= lfs->cfg->block_count ||
+ off+size > lfs->cfg->block_size) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ while (size > 0) {
+ lfs_size_t diff = size;
+
+ if (pcache && block == pcache->block &&
+ off < pcache->off + pcache->size) {
+ if (off >= pcache->off) {
+ // is already in pcache?
+ diff = lfs_min(diff, pcache->size - (off-pcache->off));
+ memcpy(data, &pcache->buffer[off-pcache->off], diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // pcache takes priority
+ diff = lfs_min(diff, pcache->off-off);
+ }
+
+ if (block == rcache->block &&
+ off < rcache->off + rcache->size) {
+ if (off >= rcache->off) {
+ // is already in rcache?
+ diff = lfs_min(diff, rcache->size - (off-rcache->off));
+ memcpy(data, &rcache->buffer[off-rcache->off], diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // rcache takes priority
+ diff = lfs_min(diff, rcache->off-off);
+ }
+
+ if (size >= hint && off % lfs->cfg->read_size == 0 &&
+ size >= lfs->cfg->read_size) {
+ // bypass cache?
+ diff = lfs_aligndown(diff, lfs->cfg->read_size);
+ int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
+ if (err) {
+ return err;
+ }
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // load to cache, first condition can no longer fail
+ LFS_ASSERT(block < lfs->cfg->block_count);
+ rcache->block = block;
+ rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
+ rcache->size = lfs_min(
+ lfs_min(
+ lfs_alignup(off+hint, lfs->cfg->read_size),
+ lfs->cfg->block_size)
+ - rcache->off,
+ lfs->cfg->cache_size);
+ int err = lfs->cfg->read(lfs->cfg, rcache->block,
+ rcache->off, rcache->buffer, rcache->size);
+ LFS_ASSERT(err <= 0);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int lfs_bd_cmp(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
+ lfs_block_t block, lfs_off_t off,
+ const void *buffer, lfs_size_t size) {
+ const uint8_t *data = buffer;
+ lfs_size_t diff = 0;
+
+ for (lfs_off_t i = 0; i < size; i += diff) {
+ uint8_t dat[8];
+
+ diff = lfs_min(size-i, sizeof(dat));
+ int res = lfs_bd_read(lfs,
+ pcache, rcache, hint-i,
+ block, off+i, &dat, diff);
+ if (res) {
+ return res;
+ }
+
+ res = memcmp(dat, data + i, diff);
+ if (res) {
+ return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
+ }
+ }
+
+ return LFS_CMP_EQ;
+}
+
+#ifndef LFS_READONLY
+static int lfs_bd_flush(lfs_t *lfs,
+ lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
+ if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
+ LFS_ASSERT(pcache->block < lfs->cfg->block_count);
+ lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
+ int err = lfs->cfg->prog(lfs->cfg, pcache->block,
+ pcache->off, pcache->buffer, diff);
+ LFS_ASSERT(err <= 0);
+ if (err) {
+ return err;
+ }
+
+ if (validate) {
+ // check data on disk
+ lfs_cache_drop(lfs, rcache);
+ int res = lfs_bd_cmp(lfs,
+ NULL, rcache, diff,
+ pcache->block, pcache->off, pcache->buffer, diff);
+ if (res < 0) {
+ return res;
+ }
+
+ if (res != LFS_CMP_EQ) {
+ return LFS_ERR_CORRUPT;
+ }
+ }
+
+ lfs_cache_zero(lfs, pcache);
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_bd_sync(lfs_t *lfs,
+ lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
+ lfs_cache_drop(lfs, rcache);
+
+ int err = lfs_bd_flush(lfs, pcache, rcache, validate);
+ if (err) {
+ return err;
+ }
+
+ err = lfs->cfg->sync(lfs->cfg);
+ LFS_ASSERT(err <= 0);
+ return err;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_bd_prog(lfs_t *lfs,
+ lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
+ lfs_block_t block, lfs_off_t off,
+ const void *buffer, lfs_size_t size) {
+ const uint8_t *data = buffer;
+ LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->cfg->block_count);
+ LFS_ASSERT(off + size <= lfs->cfg->block_size);
+
+ while (size > 0) {
+ if (block == pcache->block &&
+ off >= pcache->off &&
+ off < pcache->off + lfs->cfg->cache_size) {
+ // already fits in pcache?
+ lfs_size_t diff = lfs_min(size,
+ lfs->cfg->cache_size - (off-pcache->off));
+ memcpy(&pcache->buffer[off-pcache->off], data, diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+
+ pcache->size = lfs_max(pcache->size, off - pcache->off);
+ if (pcache->size == lfs->cfg->cache_size) {
+ // eagerly flush out pcache if we fill up
+ int err = lfs_bd_flush(lfs, pcache, rcache, validate);
+ if (err) {
+ return err;
+ }
+ }
+
+ continue;
+ }
+
+ // pcache must have been flushed, either by programming and
+ // entire block or manually flushing the pcache
+ LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
+
+ // prepare pcache, first condition can no longer fail
+ pcache->block = block;
+ pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
+ pcache->size = 0;
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
+ LFS_ASSERT(block < lfs->cfg->block_count);
+ int err = lfs->cfg->erase(lfs->cfg, block);
+ LFS_ASSERT(err <= 0);
+ return err;
+}
+#endif
+
+
+/// Small type-level utilities ///
+// operations on block pairs
+static inline void lfs_pair_swap(lfs_block_t pair[2]) {
+ lfs_block_t t = pair[0];
+ pair[0] = pair[1];
+ pair[1] = t;
+}
+
+static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
+ return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
+}
+
+static inline int lfs_pair_cmp(
+ const lfs_block_t paira[2],
+ const lfs_block_t pairb[2]) {
+ return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
+ paira[0] == pairb[1] || paira[1] == pairb[0]);
+}
+
+#ifndef LFS_READONLY
+static inline bool lfs_pair_sync(
+ const lfs_block_t paira[2],
+ const lfs_block_t pairb[2]) {
+ return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
+ (paira[0] == pairb[1] && paira[1] == pairb[0]);
+}
+#endif
+
+static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
+ pair[0] = lfs_fromle32(pair[0]);
+ pair[1] = lfs_fromle32(pair[1]);
+}
+
+#ifndef LFS_READONLY
+static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
+ pair[0] = lfs_tole32(pair[0]);
+ pair[1] = lfs_tole32(pair[1]);
+}
+#endif
+
+// operations on 32-bit entry tags
+typedef uint32_t lfs_tag_t;
+typedef int32_t lfs_stag_t;
+
+#define LFS_MKTAG(type, id, size) \
+ (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
+
+#define LFS_MKTAG_IF(cond, type, id, size) \
+ ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
+
+#define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
+ ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
+
+static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
+ return !(tag & 0x80000000);
+}
+
+static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
+ return ((int32_t)(tag << 22) >> 22) == -1;
+}
+
+static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
+ return (tag & 0x70000000) >> 20;
+}
+
+static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
+ return (tag & 0x7ff00000) >> 20;
+}
+
+static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
+ return (tag & 0x0ff00000) >> 20;
+}
+
+static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
+ return (int8_t)lfs_tag_chunk(tag);
+}
+
+static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
+ return (tag & 0x000ffc00) >> 10;
+}
+
+static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
+ return tag & 0x000003ff;
+}
+
+static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
+ return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
+}
+
+// operations on attributes in attribute lists
+struct lfs_mattr {
+ lfs_tag_t tag;
+ const void *buffer;
+};
+
+struct lfs_diskoff {
+ lfs_block_t block;
+ lfs_off_t off;
+};
+
+#define LFS_MKATTRS(...) \
+ (struct lfs_mattr[]){__VA_ARGS__}, \
+ sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
+
+// operations on global state
+static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
+ for (int i = 0; i < 3; i++) {
+ ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i];
+ }
+}
+
+static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
+ for (int i = 0; i < 3; i++) {
+ if (((uint32_t*)a)[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+#ifndef LFS_READONLY
+static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
+ return lfs_tag_size(a->tag);
+}
+
+static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
+ return lfs_tag_size(a->tag);
+}
+
+static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
+ return lfs_tag_type1(a->tag);
+}
+#endif
+
+static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
+ const lfs_block_t *pair) {
+ return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
+}
+
+static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
+ a->tag = lfs_fromle32(a->tag);
+ a->pair[0] = lfs_fromle32(a->pair[0]);
+ a->pair[1] = lfs_fromle32(a->pair[1]);
+}
+
+#ifndef LFS_READONLY
+static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
+ a->tag = lfs_tole32(a->tag);
+ a->pair[0] = lfs_tole32(a->pair[0]);
+ a->pair[1] = lfs_tole32(a->pair[1]);
+}
+#endif
+
+// other endianness operations
+static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
+ ctz->head = lfs_fromle32(ctz->head);
+ ctz->size = lfs_fromle32(ctz->size);
+}
+
+#ifndef LFS_READONLY
+static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
+ ctz->head = lfs_tole32(ctz->head);
+ ctz->size = lfs_tole32(ctz->size);
+}
+#endif
+
+static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
+ superblock->version = lfs_fromle32(superblock->version);
+ superblock->block_size = lfs_fromle32(superblock->block_size);
+ superblock->block_count = lfs_fromle32(superblock->block_count);
+ superblock->name_max = lfs_fromle32(superblock->name_max);
+ superblock->file_max = lfs_fromle32(superblock->file_max);
+ superblock->attr_max = lfs_fromle32(superblock->attr_max);
+}
+
+#ifndef LFS_READONLY
+static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
+ superblock->version = lfs_tole32(superblock->version);
+ superblock->block_size = lfs_tole32(superblock->block_size);
+ superblock->block_count = lfs_tole32(superblock->block_count);
+ superblock->name_max = lfs_tole32(superblock->name_max);
+ superblock->file_max = lfs_tole32(superblock->file_max);
+ superblock->attr_max = lfs_tole32(superblock->attr_max);
+}
+#endif
+
+#ifndef LFS_NO_ASSERT
+static bool lfs_mlist_isopen(struct lfs_mlist *head,
+ struct lfs_mlist *node) {
+ for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) {
+ if (*p == (struct lfs_mlist*)node) {
+ return true;
+ }
+ }
+
+ return false;
+}
+#endif
+
+static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) {
+ for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
+ if (*p == mlist) {
+ *p = (*p)->next;
+ break;
+ }
+ }
+}
+
+static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) {
+ mlist->next = lfs->mlist;
+ lfs->mlist = mlist;
+}
+
+
+/// Internal operations predeclared here ///
+#ifndef LFS_READONLY
+static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
+ const struct lfs_mattr *attrs, int attrcount);
+static int lfs_dir_compact(lfs_t *lfs,
+ lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *source, uint16_t begin, uint16_t end);
+static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size);
+static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size);
+static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file);
+static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
+static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
+
+static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
+static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
+static void lfs_fs_prepmove(lfs_t *lfs,
+ uint16_t id, const lfs_block_t pair[2]);
+static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
+ lfs_mdir_t *pdir);
+static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
+ lfs_mdir_t *parent);
+static int lfs_fs_forceconsistency(lfs_t *lfs);
+#endif
+
+#ifdef LFS_MIGRATE
+static int lfs1_traverse(lfs_t *lfs,
+ int (*cb)(void*, lfs_block_t), void *data);
+#endif
+
+static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir);
+
+static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size);
+static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size);
+static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file);
+static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file);
+
+static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs);
+static int lfs_fs_rawtraverse(lfs_t *lfs,
+ int (*cb)(void *data, lfs_block_t block), void *data,
+ bool includeorphans);
+
+static int lfs_deinit(lfs_t *lfs);
+static int lfs_rawunmount(lfs_t *lfs);
+
+
+/// Block allocator ///
+#ifndef LFS_READONLY
+static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
+ lfs_t *lfs = (lfs_t*)p;
+ lfs_block_t off = ((block - lfs->free.off)
+ + lfs->cfg->block_count) % lfs->cfg->block_count;
+
+ if (off < lfs->free.size) {
+ lfs->free.buffer[off / 32] |= 1U << (off % 32);
+ }
+
+ return 0;
+}
+#endif
+
+// indicate allocated blocks have been committed into the filesystem, this
+// is to prevent blocks from being garbage collected in the middle of a
+// commit operation
+static void lfs_alloc_ack(lfs_t *lfs) {
+ lfs->free.ack = lfs->cfg->block_count;
+}
+
+// drop the lookahead buffer, this is done during mounting and failed
+// traversals in order to avoid invalid lookahead state
+static void lfs_alloc_drop(lfs_t *lfs) {
+ lfs->free.size = 0;
+ lfs->free.i = 0;
+ lfs_alloc_ack(lfs);
+}
+
+#ifndef LFS_READONLY
+static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
+ while (true) {
+ while (lfs->free.i != lfs->free.size) {
+ lfs_block_t off = lfs->free.i;
+ lfs->free.i += 1;
+ lfs->free.ack -= 1;
+
+ if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
+ // found a free block
+ *block = (lfs->free.off + off) % lfs->cfg->block_count;
+
+ // eagerly find next off so an alloc ack can
+ // discredit old lookahead blocks
+ while (lfs->free.i != lfs->free.size &&
+ (lfs->free.buffer[lfs->free.i / 32]
+ & (1U << (lfs->free.i % 32)))) {
+ lfs->free.i += 1;
+ lfs->free.ack -= 1;
+ }
+
+ return 0;
+ }
+ }
+
+ // check if we have looked at all blocks since last ack
+ if (lfs->free.ack == 0) {
+ LFS_ERROR("No more free space %"PRIu32,
+ lfs->free.i + lfs->free.off);
+ return LFS_ERR_NOSPC;
+ }
+
+ lfs->free.off = (lfs->free.off + lfs->free.size)
+ % lfs->cfg->block_count;
+ lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
+ lfs->free.i = 0;
+
+ // find mask of free blocks from tree
+ memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
+ int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
+ if (err) {
+ lfs_alloc_drop(lfs);
+ return err;
+ }
+ }
+}
+#endif
+
+/// Metadata pair and directory operations ///
+static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
+ lfs_tag_t gmask, lfs_tag_t gtag,
+ lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
+ lfs_off_t off = dir->off;
+ lfs_tag_t ntag = dir->etag;
+ lfs_stag_t gdiff = 0;
+
+ if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
+ lfs_tag_id(gmask) != 0 &&
+ lfs_tag_id(lfs->gdisk.tag) <= lfs_tag_id(gtag)) {
+ // synthetic moves
+ gdiff -= LFS_MKTAG(0, 1, 0);
+ }
+
+ // iterate over dir block backwards (for faster lookups)
+ while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
+ off -= lfs_tag_dsize(ntag);
+ lfs_tag_t tag = ntag;
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, sizeof(ntag),
+ dir->pair[0], off, &ntag, sizeof(ntag));
+ if (err) {
+ return err;
+ }
+
+ ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
+
+ if (lfs_tag_id(gmask) != 0 &&
+ lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
+ lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
+ if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
+ (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
+ // found where we were created
+ return LFS_ERR_NOENT;
+ }
+
+ // move around splices
+ gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
+ }
+
+ if ((gmask & tag) == (gmask & (gtag - gdiff))) {
+ if (lfs_tag_isdelete(tag)) {
+ return LFS_ERR_NOENT;
+ }
+
+ lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, diff,
+ dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
+ if (err) {
+ return err;
+ }
+
+ memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
+
+ return tag + gdiff;
+ }
+ }
+
+ return LFS_ERR_NOENT;
+}
+
+static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
+ lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
+ return lfs_dir_getslice(lfs, dir,
+ gmask, gtag,
+ 0, buffer, lfs_tag_size(gtag));
+}
+
+static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
+ lfs_tag_t gmask, lfs_tag_t gtag,
+ lfs_off_t off, void *buffer, lfs_size_t size) {
+ uint8_t *data = buffer;
+ if (off+size > lfs->cfg->block_size) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ while (size > 0) {
+ lfs_size_t diff = size;
+
+ if (pcache && pcache->block == LFS_BLOCK_INLINE &&
+ off < pcache->off + pcache->size) {
+ if (off >= pcache->off) {
+ // is already in pcache?
+ diff = lfs_min(diff, pcache->size - (off-pcache->off));
+ memcpy(data, &pcache->buffer[off-pcache->off], diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // pcache takes priority
+ diff = lfs_min(diff, pcache->off-off);
+ }
+
+ if (rcache->block == LFS_BLOCK_INLINE &&
+ off < rcache->off + rcache->size) {
+ if (off >= rcache->off) {
+ // is already in rcache?
+ diff = lfs_min(diff, rcache->size - (off-rcache->off));
+ memcpy(data, &rcache->buffer[off-rcache->off], diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // rcache takes priority
+ diff = lfs_min(diff, rcache->off-off);
+ }
+
+ // load to cache, first condition can no longer fail
+ rcache->block = LFS_BLOCK_INLINE;
+ rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
+ rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
+ lfs->cfg->cache_size);
+ int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
+ rcache->off, rcache->buffer, rcache->size);
+ if (err < 0) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_dir_traverse_filter(void *p,
+ lfs_tag_t tag, const void *buffer) {
+ lfs_tag_t *filtertag = p;
+ (void)buffer;
+
+ // which mask depends on unique bit in tag structure
+ uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
+ ? LFS_MKTAG(0x7ff, 0x3ff, 0)
+ : LFS_MKTAG(0x700, 0x3ff, 0);
+
+ // check for redundancy
+ if ((mask & tag) == (mask & *filtertag) ||
+ lfs_tag_isdelete(*filtertag) ||
+ (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
+ LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
+ (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
+ *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0);
+ return true;
+ }
+
+ // check if we need to adjust for created/deleted tags
+ if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
+ lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
+ *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
+ }
+
+ return false;
+}
+#endif
+
+#ifndef LFS_READONLY
+// maximum recursive depth of lfs_dir_traverse, the deepest call:
+//
+// traverse with commit
+// '-> traverse with move
+// '-> traverse with filter
+//
+#define LFS_DIR_TRAVERSE_DEPTH 3
+
+struct lfs_dir_traverse {
+ const lfs_mdir_t *dir;
+ lfs_off_t off;
+ lfs_tag_t ptag;
+ const struct lfs_mattr *attrs;
+ int attrcount;
+
+ lfs_tag_t tmask;
+ lfs_tag_t ttag;
+ uint16_t begin;
+ uint16_t end;
+ int16_t diff;
+
+ int (*cb)(void *data, lfs_tag_t tag, const void *buffer);
+ void *data;
+
+ lfs_tag_t tag;
+ const void *buffer;
+ struct lfs_diskoff disk;
+};
+
+static int lfs_dir_traverse(lfs_t *lfs,
+ const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
+ const struct lfs_mattr *attrs, int attrcount,
+ lfs_tag_t tmask, lfs_tag_t ttag,
+ uint16_t begin, uint16_t end, int16_t diff,
+ int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
+ // This function in inherently recursive, but bounded. To allow tool-based
+ // analysis without unnecessary code-cost we use an explicit stack
+ struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1];
+ unsigned sp = 0;
+ int res;
+
+ // iterate over directory and attrs
+ lfs_tag_t tag;
+ const void *buffer;
+ struct lfs_diskoff disk;
+ while (true) {
+ {
+ if (off+lfs_tag_dsize(ptag) < dir->off) {
+ off += lfs_tag_dsize(ptag);
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, sizeof(tag),
+ dir->pair[0], off, &tag, sizeof(tag));
+ if (err) {
+ return err;
+ }
+
+ tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
+ disk.block = dir->pair[0];
+ disk.off = off+sizeof(lfs_tag_t);
+ buffer = &disk;
+ ptag = tag;
+ } else if (attrcount > 0) {
+ tag = attrs[0].tag;
+ buffer = attrs[0].buffer;
+ attrs += 1;
+ attrcount -= 1;
+ } else {
+ // finished traversal, pop from stack?
+ res = 0;
+ break;
+ }
+
+ // do we need to filter?
+ lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
+ if ((mask & tmask & tag) != (mask & tmask & ttag)) {
+ continue;
+ }
+
+ if (lfs_tag_id(tmask) != 0) {
+ LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH);
+ // recurse, scan for duplicates, and update tag based on
+ // creates/deletes
+ stack[sp] = (struct lfs_dir_traverse){
+ .dir = dir,
+ .off = off,
+ .ptag = ptag,
+ .attrs = attrs,
+ .attrcount = attrcount,
+ .tmask = tmask,
+ .ttag = ttag,
+ .begin = begin,
+ .end = end,
+ .diff = diff,
+ .cb = cb,
+ .data = data,
+ .tag = tag,
+ .buffer = buffer,
+ .disk = disk,
+ };
+ sp += 1;
+
+ tmask = 0;
+ ttag = 0;
+ begin = 0;
+ end = 0;
+ diff = 0;
+ cb = lfs_dir_traverse_filter;
+ data = &stack[sp-1].tag;
+ continue;
+ }
+ }
+
+popped:
+ // in filter range?
+ if (lfs_tag_id(tmask) != 0 &&
+ !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
+ continue;
+ }
+
+ // handle special cases for mcu-side operations
+ if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
+ // do nothing
+ } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
+ // Without this condition, lfs_dir_traverse can exhibit an
+ // extremely expensive O(n^3) of nested loops when renaming.
+ // This happens because lfs_dir_traverse tries to filter tags by
+ // the tags in the source directory, triggering a second
+ // lfs_dir_traverse with its own filter operation.
+ //
+ // traverse with commit
+ // '-> traverse with filter
+ // '-> traverse with move
+ // '-> traverse with filter
+ //
+ // However we don't actually care about filtering the second set of
+ // tags, since duplicate tags have no effect when filtering.
+ //
+ // This check skips this unnecessary recursive filtering explicitly,
+ // reducing this runtime from O(n^3) to O(n^2).
+ if (cb == lfs_dir_traverse_filter) {
+ continue;
+ }
+
+ // recurse into move
+ stack[sp] = (struct lfs_dir_traverse){
+ .dir = dir,
+ .off = off,
+ .ptag = ptag,
+ .attrs = attrs,
+ .attrcount = attrcount,
+ .tmask = tmask,
+ .ttag = ttag,
+ .begin = begin,
+ .end = end,
+ .diff = diff,
+ .cb = cb,
+ .data = data,
+ .tag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0),
+ };
+ sp += 1;
+
+ uint16_t fromid = lfs_tag_size(tag);
+ uint16_t toid = lfs_tag_id(tag);
+ dir = buffer;
+ off = 0;
+ ptag = 0xffffffff;
+ attrs = NULL;
+ attrcount = 0;
+ tmask = LFS_MKTAG(0x600, 0x3ff, 0);
+ ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0);
+ begin = fromid;
+ end = fromid+1;
+ diff = toid-fromid+diff;
+ } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
+ for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
+ const struct lfs_attr *a = buffer;
+ res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
+ lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
+ if (res < 0) {
+ return res;
+ }
+
+ if (res) {
+ break;
+ }
+ }
+ } else {
+ res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
+ if (res < 0) {
+ return res;
+ }
+
+ if (res) {
+ break;
+ }
+ }
+ }
+
+ if (sp > 0) {
+ // pop from the stack and return, fortunately all pops share
+ // a destination
+ dir = stack[sp-1].dir;
+ off = stack[sp-1].off;
+ ptag = stack[sp-1].ptag;
+ attrs = stack[sp-1].attrs;
+ attrcount = stack[sp-1].attrcount;
+ tmask = stack[sp-1].tmask;
+ ttag = stack[sp-1].ttag;
+ begin = stack[sp-1].begin;
+ end = stack[sp-1].end;
+ diff = stack[sp-1].diff;
+ cb = stack[sp-1].cb;
+ data = stack[sp-1].data;
+ tag = stack[sp-1].tag;
+ buffer = stack[sp-1].buffer;
+ disk = stack[sp-1].disk;
+ sp -= 1;
+ goto popped;
+ } else {
+ return res;
+ }
+}
+#endif
+
+static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
+ lfs_mdir_t *dir, const lfs_block_t pair[2],
+ lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
+ int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
+ // we can find tag very efficiently during a fetch, since we're already
+ // scanning the entire directory
+ lfs_stag_t besttag = -1;
+
+ // if either block address is invalid we return LFS_ERR_CORRUPT here,
+ // otherwise later writes to the pair could fail
+ if (pair[0] >= lfs->cfg->block_count || pair[1] >= lfs->cfg->block_count) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ // find the block with the most recent revision
+ uint32_t revs[2] = {0, 0};
+ int r = 0;
+ for (int i = 0; i < 2; i++) {
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, sizeof(revs[i]),
+ pair[i], 0, &revs[i], sizeof(revs[i]));
+ revs[i] = lfs_fromle32(revs[i]);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
+
+ if (err != LFS_ERR_CORRUPT &&
+ lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
+ r = i;
+ }
+ }
+
+ dir->pair[0] = pair[(r+0)%2];
+ dir->pair[1] = pair[(r+1)%2];
+ dir->rev = revs[(r+0)%2];
+ dir->off = 0; // nonzero = found some commits
+
+ // now scan tags to fetch the actual dir and find possible match
+ for (int i = 0; i < 2; i++) {
+ lfs_off_t off = 0;
+ lfs_tag_t ptag = 0xffffffff;
+
+ uint16_t tempcount = 0;
+ lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
+ bool tempsplit = false;
+ lfs_stag_t tempbesttag = besttag;
+
+ dir->rev = lfs_tole32(dir->rev);
+ uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
+ dir->rev = lfs_fromle32(dir->rev);
+
+ while (true) {
+ // extract next tag
+ lfs_tag_t tag;
+ off += lfs_tag_dsize(ptag);
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off, &tag, sizeof(tag));
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ // can't continue?
+ dir->erased = false;
+ break;
+ }
+ return err;
+ }
+
+ crc = lfs_crc(crc, &tag, sizeof(tag));
+ tag = lfs_frombe32(tag) ^ ptag;
+
+ // next commit not yet programmed or we're not in valid range
+ if (!lfs_tag_isvalid(tag)) {
+ dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC &&
+ dir->off % lfs->cfg->prog_size == 0);
+ break;
+ } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
+ dir->erased = false;
+ break;
+ }
+
+ ptag = tag;
+
+ if (lfs_tag_type1(tag) == LFS_TYPE_CRC) {
+ // check the crc attr
+ uint32_t dcrc;
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ dir->erased = false;
+ break;
+ }
+ return err;
+ }
+ dcrc = lfs_fromle32(dcrc);
+
+ if (crc != dcrc) {
+ dir->erased = false;
+ break;
+ }
+
+ // reset the next bit if we need to
+ ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
+
+ // toss our crc into the filesystem seed for
+ // pseudorandom numbers, note we use another crc here
+ // as a collection function because it is sufficiently
+ // random and convenient
+ lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc));
+
+ // update with what's found so far
+ besttag = tempbesttag;
+ dir->off = off + lfs_tag_dsize(tag);
+ dir->etag = ptag;
+ dir->count = tempcount;
+ dir->tail[0] = temptail[0];
+ dir->tail[1] = temptail[1];
+ dir->split = tempsplit;
+
+ // reset crc
+ crc = 0xffffffff;
+ continue;
+ }
+
+ // crc the entry first, hopefully leaving it in the cache
+ for (lfs_off_t j = sizeof(tag); j < lfs_tag_dsize(tag); j++) {
+ uint8_t dat;
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off+j, &dat, 1);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ dir->erased = false;
+ break;
+ }
+ return err;
+ }
+
+ crc = lfs_crc(crc, &dat, 1);
+ }
+
+ // directory modification tags?
+ if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
+ // increase count of files if necessary
+ if (lfs_tag_id(tag) >= tempcount) {
+ tempcount = lfs_tag_id(tag) + 1;
+ }
+ } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
+ tempcount += lfs_tag_splice(tag);
+
+ if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
+ (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
+ tempbesttag |= 0x80000000;
+ } else if (tempbesttag != -1 &&
+ lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
+ tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
+ }
+ } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
+ tempsplit = (lfs_tag_chunk(tag) & 1);
+
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, lfs->cfg->block_size,
+ dir->pair[0], off+sizeof(tag), &temptail, 8);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ dir->erased = false;
+ break;
+ }
+ return err;
+ }
+ lfs_pair_fromle32(temptail);
+ }
+
+ // found a match for our fetcher?
+ if ((fmask & tag) == (fmask & ftag)) {
+ int res = cb(data, tag, &(struct lfs_diskoff){
+ dir->pair[0], off+sizeof(tag)});
+ if (res < 0) {
+ if (res == LFS_ERR_CORRUPT) {
+ dir->erased = false;
+ break;
+ }
+ return res;
+ }
+
+ if (res == LFS_CMP_EQ) {
+ // found a match
+ tempbesttag = tag;
+ } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
+ (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
+ // found an identical tag, but contents didn't match
+ // this must mean that our besttag has been overwritten
+ tempbesttag = -1;
+ } else if (res == LFS_CMP_GT &&
+ lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
+ // found a greater match, keep track to keep things sorted
+ tempbesttag = tag | 0x80000000;
+ }
+ }
+ }
+
+ // consider what we have good enough
+ if (dir->off > 0) {
+ // synthetic move
+ if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
+ if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
+ besttag |= 0x80000000;
+ } else if (besttag != -1 &&
+ lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
+ besttag -= LFS_MKTAG(0, 1, 0);
+ }
+ }
+
+ // found tag? or found best id?
+ if (id) {
+ *id = lfs_min(lfs_tag_id(besttag), dir->count);
+ }
+
+ if (lfs_tag_isvalid(besttag)) {
+ return besttag;
+ } else if (lfs_tag_id(besttag) < dir->count) {
+ return LFS_ERR_NOENT;
+ } else {
+ return 0;
+ }
+ }
+
+ // failed, try the other block?
+ lfs_pair_swap(dir->pair);
+ dir->rev = revs[(r+1)%2];
+ }
+
+ LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
+ dir->pair[0], dir->pair[1]);
+ return LFS_ERR_CORRUPT;
+}
+
+static int lfs_dir_fetch(lfs_t *lfs,
+ lfs_mdir_t *dir, const lfs_block_t pair[2]) {
+ // note, mask=-1, tag=-1 can never match a tag since this
+ // pattern has the invalid bit set
+ return (int)lfs_dir_fetchmatch(lfs, dir, pair,
+ (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
+}
+
+static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
+ lfs_gstate_t *gstate) {
+ lfs_gstate_t temp;
+ lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
+ LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
+ if (res < 0 && res != LFS_ERR_NOENT) {
+ return res;
+ }
+
+ if (res != LFS_ERR_NOENT) {
+ // xor together to find resulting gstate
+ lfs_gstate_fromle32(&temp);
+ lfs_gstate_xor(gstate, &temp);
+ }
+
+ return 0;
+}
+
+static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
+ uint16_t id, struct lfs_info *info) {
+ if (id == 0x3ff) {
+ // special case for root
+ strcpy(info->name, "/");
+ info->type = LFS_TYPE_DIR;
+ return 0;
+ }
+
+ lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
+ if (tag < 0) {
+ return (int)tag;
+ }
+
+ info->type = lfs_tag_type3(tag);
+
+ struct lfs_ctz ctz;
+ tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
+ if (tag < 0) {
+ return (int)tag;
+ }
+ lfs_ctz_fromle32(&ctz);
+
+ if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
+ info->size = ctz.size;
+ } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
+ info->size = lfs_tag_size(tag);
+ }
+
+ return 0;
+}
+
+struct lfs_dir_find_match {
+ lfs_t *lfs;
+ const void *name;
+ lfs_size_t size;
+};
+
+static int lfs_dir_find_match(void *data,
+ lfs_tag_t tag, const void *buffer) {
+ struct lfs_dir_find_match *name = data;
+ lfs_t *lfs = name->lfs;
+ const struct lfs_diskoff *disk = buffer;
+
+ // compare with disk
+ lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
+ int res = lfs_bd_cmp(lfs,
+ NULL, &lfs->rcache, diff,
+ disk->block, disk->off, name->name, diff);
+ if (res != LFS_CMP_EQ) {
+ return res;
+ }
+
+ // only equal if our size is still the same
+ if (name->size != lfs_tag_size(tag)) {
+ return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
+ }
+
+ // found a match!
+ return LFS_CMP_EQ;
+}
+
+static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
+ const char **path, uint16_t *id) {
+ // we reduce path to a single name if we can find it
+ const char *name = *path;
+ if (id) {
+ *id = 0x3ff;
+ }
+
+ // default to root dir
+ lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
+ dir->tail[0] = lfs->root[0];
+ dir->tail[1] = lfs->root[1];
+
+ while (true) {
+nextname:
+ // skip slashes
+ name += strspn(name, "/");
+ lfs_size_t namelen = strcspn(name, "/");
+
+ // skip '.' and root '..'
+ if ((namelen == 1 && memcmp(name, ".", 1) == 0) ||
+ (namelen == 2 && memcmp(name, "..", 2) == 0)) {
+ name += namelen;
+ goto nextname;
+ }
+
+ // skip if matched by '..' in name
+ const char *suffix = name + namelen;
+ lfs_size_t sufflen;
+ int depth = 1;
+ while (true) {
+ suffix += strspn(suffix, "/");
+ sufflen = strcspn(suffix, "/");
+ if (sufflen == 0) {
+ break;
+ }
+
+ if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
+ depth -= 1;
+ if (depth == 0) {
+ name = suffix + sufflen;
+ goto nextname;
+ }
+ } else {
+ depth += 1;
+ }
+
+ suffix += sufflen;
+ }
+
+ // found path
+ if (name[0] == '\0') {
+ return tag;
+ }
+
+ // update what we've found so far
+ *path = name;
+
+ // only continue if we hit a directory
+ if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
+ return LFS_ERR_NOTDIR;
+ }
+
+ // grab the entry data
+ if (lfs_tag_id(tag) != 0x3ff) {
+ lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
+ if (res < 0) {
+ return res;
+ }
+ lfs_pair_fromle32(dir->tail);
+ }
+
+ // find entry matching name
+ while (true) {
+ tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
+ LFS_MKTAG(0x780, 0, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
+ // are we last name?
+ (strchr(name, '/') == NULL) ? id : NULL,
+ lfs_dir_find_match, &(struct lfs_dir_find_match){
+ lfs, name, namelen});
+ if (tag < 0) {
+ return tag;
+ }
+
+ if (tag) {
+ break;
+ }
+
+ if (!dir->split) {
+ return LFS_ERR_NOENT;
+ }
+ }
+
+ // to next name
+ name += namelen;
+ }
+}
+
+// commit logic
+struct lfs_commit {
+ lfs_block_t block;
+ lfs_off_t off;
+ lfs_tag_t ptag;
+ uint32_t crc;
+
+ lfs_off_t begin;
+ lfs_off_t end;
+};
+
+#ifndef LFS_READONLY
+static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
+ const void *buffer, lfs_size_t size) {
+ int err = lfs_bd_prog(lfs,
+ &lfs->pcache, &lfs->rcache, false,
+ commit->block, commit->off ,
+ (const uint8_t*)buffer, size);
+ if (err) {
+ return err;
+ }
+
+ commit->crc = lfs_crc(commit->crc, buffer, size);
+ commit->off += size;
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
+ lfs_tag_t tag, const void *buffer) {
+ // check if we fit
+ lfs_size_t dsize = lfs_tag_dsize(tag);
+ if (commit->off + dsize > commit->end) {
+ return LFS_ERR_NOSPC;
+ }
+
+ // write out tag
+ lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
+ int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
+ if (err) {
+ return err;
+ }
+
+ if (!(tag & 0x80000000)) {
+ // from memory
+ err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
+ if (err) {
+ return err;
+ }
+ } else {
+ // from disk
+ const struct lfs_diskoff *disk = buffer;
+ for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
+ // rely on caching to make this efficient
+ uint8_t dat;
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, dsize-sizeof(tag)-i,
+ disk->block, disk->off+i, &dat, 1);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_dir_commitprog(lfs, commit, &dat, 1);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ commit->ptag = tag & 0x7fffffff;
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
+ // align to program units
+ const lfs_off_t end = lfs_alignup(commit->off + 2*sizeof(uint32_t),
+ lfs->cfg->prog_size);
+
+ lfs_off_t off1 = 0;
+ uint32_t crc1 = 0;
+
+ // create crc tags to fill up remainder of commit, note that
+ // padding is not crced, which lets fetches skip padding but
+ // makes committing a bit more complicated
+ while (commit->off < end) {
+ lfs_off_t off = commit->off + sizeof(lfs_tag_t);
+ lfs_off_t noff = lfs_min(end - off, 0x3fe) + off;
+ if (noff < end) {
+ noff = lfs_min(noff, end - 2*sizeof(uint32_t));
+ }
+
+ // read erased state from next program unit
+ lfs_tag_t tag = 0xffffffff;
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, sizeof(tag),
+ commit->block, noff, &tag, sizeof(tag));
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
+
+ // build crc tag
+ bool reset = ~lfs_frombe32(tag) >> 31;
+ tag = LFS_MKTAG(LFS_TYPE_CRC + reset, 0x3ff, noff - off);
+
+ // write out crc
+ uint32_t footer[2];
+ footer[0] = lfs_tobe32(tag ^ commit->ptag);
+ commit->crc = lfs_crc(commit->crc, &footer[0], sizeof(footer[0]));
+ footer[1] = lfs_tole32(commit->crc);
+ err = lfs_bd_prog(lfs,
+ &lfs->pcache, &lfs->rcache, false,
+ commit->block, commit->off, &footer, sizeof(footer));
+ if (err) {
+ return err;
+ }
+
+ // keep track of non-padding checksum to verify
+ if (off1 == 0) {
+ off1 = commit->off + sizeof(uint32_t);
+ crc1 = commit->crc;
+ }
+
+ commit->off += sizeof(tag)+lfs_tag_size(tag);
+ commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
+ commit->crc = 0xffffffff; // reset crc for next "commit"
+ }
+
+ // flush buffers
+ int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
+ if (err) {
+ return err;
+ }
+
+ // successful commit, check checksums to make sure
+ lfs_off_t off = commit->begin;
+ lfs_off_t noff = off1;
+ while (off < end) {
+ uint32_t crc = 0xffffffff;
+ for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
+ // check against written crc, may catch blocks that
+ // become readonly and match our commit size exactly
+ if (i == off1 && crc != crc1) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ // leave it up to caching to make this efficient
+ uint8_t dat;
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, noff+sizeof(uint32_t)-i,
+ commit->block, i, &dat, 1);
+ if (err) {
+ return err;
+ }
+
+ crc = lfs_crc(crc, &dat, 1);
+ }
+
+ // detected write error?
+ if (crc != 0) {
+ return LFS_ERR_CORRUPT;
+ }
+
+ // skip padding
+ off = lfs_min(end - noff, 0x3fe) + noff;
+ if (off < end) {
+ off = lfs_min(off, end - 2*sizeof(uint32_t));
+ }
+ noff = off + sizeof(uint32_t);
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
+ // allocate pair of dir blocks (backwards, so we write block 1 first)
+ for (int i = 0; i < 2; i++) {
+ int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
+ if (err) {
+ return err;
+ }
+ }
+
+ // zero for reproducibility in case initial block is unreadable
+ dir->rev = 0;
+
+ // rather than clobbering one of the blocks we just pretend
+ // the revision may be valid
+ int err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, sizeof(dir->rev),
+ dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
+ dir->rev = lfs_fromle32(dir->rev);
+ if (err && err != LFS_ERR_CORRUPT) {
+ return err;
+ }
+
+ // to make sure we don't immediately evict, align the new revision count
+ // to our block_cycles modulus, see lfs_dir_compact for why our modulus
+ // is tweaked this way
+ if (lfs->cfg->block_cycles > 0) {
+ dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1));
+ }
+
+ // set defaults
+ dir->off = sizeof(dir->rev);
+ dir->etag = 0xffffffff;
+ dir->count = 0;
+ dir->tail[0] = LFS_BLOCK_NULL;
+ dir->tail[1] = LFS_BLOCK_NULL;
+ dir->erased = false;
+ dir->split = false;
+
+ // don't write out yet, let caller take care of that
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
+ // steal state
+ int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
+ if (err) {
+ return err;
+ }
+
+ // steal tail
+ lfs_pair_tole32(tail->tail);
+ err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
+ lfs_pair_fromle32(tail->tail);
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_split(lfs_t *lfs,
+ lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *source, uint16_t split, uint16_t end) {
+ // create tail metadata pair
+ lfs_mdir_t tail;
+ int err = lfs_dir_alloc(lfs, &tail);
+ if (err) {
+ return err;
+ }
+
+ tail.split = dir->split;
+ tail.tail[0] = dir->tail[0];
+ tail.tail[1] = dir->tail[1];
+
+ // note we don't care about LFS_OK_RELOCATED
+ int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
+ if (res < 0) {
+ return res;
+ }
+
+ dir->tail[0] = tail.pair[0];
+ dir->tail[1] = tail.pair[1];
+ dir->split = true;
+
+ // update root if needed
+ if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
+ lfs->root[0] = tail.pair[0];
+ lfs->root[1] = tail.pair[1];
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
+ lfs_size_t *size = p;
+ (void)buffer;
+
+ *size += lfs_tag_dsize(tag);
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+struct lfs_dir_commit_commit {
+ lfs_t *lfs;
+ struct lfs_commit *commit;
+};
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
+ struct lfs_dir_commit_commit *commit = p;
+ return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
+}
+#endif
+
+#ifndef LFS_READONLY
+static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
+ // If our revision count == n * block_cycles, we should force a relocation,
+ // this is how littlefs wear-levels at the metadata-pair level. Note that we
+ // actually use (block_cycles+1)|1, this is to avoid two corner cases:
+ // 1. block_cycles = 1, which would prevent relocations from terminating
+ // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
+ // one metadata block in the pair, effectively making this useless
+ return (lfs->cfg->block_cycles > 0
+ && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_compact(lfs_t *lfs,
+ lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *source, uint16_t begin, uint16_t end) {
+ // save some state in case block is bad
+ bool relocated = false;
+ bool tired = lfs_dir_needsrelocation(lfs, dir);
+
+ // increment revision count
+ dir->rev += 1;
+
+ // do not proactively relocate blocks during migrations, this
+ // can cause a number of failure states such: clobbering the
+ // v1 superblock if we relocate root, and invalidating directory
+ // pointers if we relocate the head of a directory. On top of
+ // this, relocations increase the overall complexity of
+ // lfs_migration, which is already a delicate operation.
+#ifdef LFS_MIGRATE
+ if (lfs->lfs1) {
+ tired = false;
+ }
+#endif
+
+ if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
+ // we're writing too much, time to relocate
+ goto relocate;
+ }
+
+ // begin loop to commit compaction to blocks until a compact sticks
+ while (true) {
+ {
+ // setup commit state
+ struct lfs_commit commit = {
+ .block = dir->pair[1],
+ .off = 0,
+ .ptag = 0xffffffff,
+ .crc = 0xffffffff,
+
+ .begin = 0,
+ .end = (lfs->cfg->metadata_max ?
+ lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
+ };
+
+ // erase block to write to
+ int err = lfs_bd_erase(lfs, dir->pair[1]);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // write out header
+ dir->rev = lfs_tole32(dir->rev);
+ err = lfs_dir_commitprog(lfs, &commit,
+ &dir->rev, sizeof(dir->rev));
+ dir->rev = lfs_fromle32(dir->rev);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // traverse the directory, this time writing out all unique tags
+ err = lfs_dir_traverse(lfs,
+ source, 0, 0xffffffff, attrs, attrcount,
+ LFS_MKTAG(0x400, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
+ begin, end, -begin,
+ lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
+ lfs, &commit});
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // commit tail, which may be new after last size check
+ if (!lfs_pair_isnull(dir->tail)) {
+ lfs_pair_tole32(dir->tail);
+ err = lfs_dir_commitattr(lfs, &commit,
+ LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
+ dir->tail);
+ lfs_pair_fromle32(dir->tail);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+ }
+
+ // bring over gstate?
+ lfs_gstate_t delta = {0};
+ if (!relocated) {
+ lfs_gstate_xor(&delta, &lfs->gdisk);
+ lfs_gstate_xor(&delta, &lfs->gstate);
+ }
+ lfs_gstate_xor(&delta, &lfs->gdelta);
+ delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
+
+ err = lfs_dir_getgstate(lfs, dir, &delta);
+ if (err) {
+ return err;
+ }
+
+ if (!lfs_gstate_iszero(&delta)) {
+ lfs_gstate_tole32(&delta);
+ err = lfs_dir_commitattr(lfs, &commit,
+ LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
+ sizeof(delta)), &delta);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+ }
+
+ // complete commit with crc
+ err = lfs_dir_commitcrc(lfs, &commit);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // successful compaction, swap dir pair to indicate most recent
+ LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
+ lfs_pair_swap(dir->pair);
+ dir->count = end - begin;
+ dir->off = commit.off;
+ dir->etag = commit.ptag;
+ // update gstate
+ lfs->gdelta = (lfs_gstate_t){0};
+ if (!relocated) {
+ lfs->gdisk = lfs->gstate;
+ }
+ }
+ break;
+
+relocate:
+ // commit was corrupted, drop caches and prepare to relocate block
+ relocated = true;
+ lfs_cache_drop(lfs, &lfs->pcache);
+ if (!tired) {
+ LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
+ }
+
+ // can't relocate superblock, filesystem is now frozen
+ if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
+ LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
+ dir->pair[1]);
+ return LFS_ERR_NOSPC;
+ }
+
+ // relocate half of pair
+ int err = lfs_alloc(lfs, &dir->pair[1]);
+ if (err && (err != LFS_ERR_NOSPC || !tired)) {
+ return err;
+ }
+
+ tired = false;
+ continue;
+ }
+
+ return relocated ? LFS_OK_RELOCATED : 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
+ const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *source, uint16_t begin, uint16_t end) {
+ while (true) {
+ // find size of first split, we do this by halving the split until
+ // the metadata is guaranteed to fit
+ //
+ // Note that this isn't a true binary search, we never increase the
+ // split size. This may result in poorly distributed metadata but isn't
+ // worth the extra code size or performance hit to fix.
+ lfs_size_t split = begin;
+ while (end - split > 1) {
+ lfs_size_t size = 0;
+ int err = lfs_dir_traverse(lfs,
+ source, 0, 0xffffffff, attrs, attrcount,
+ LFS_MKTAG(0x400, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
+ split, end, -split,
+ lfs_dir_commit_size, &size);
+ if (err) {
+ return err;
+ }
+
+ // space is complicated, we need room for tail, crc, gstate,
+ // cleanup delete, and we cap at half a block to give room
+ // for metadata updates.
+ if (end - split < 0xff
+ && size <= lfs_min(lfs->cfg->block_size - 36,
+ lfs_alignup(
+ (lfs->cfg->metadata_max
+ ? lfs->cfg->metadata_max
+ : lfs->cfg->block_size)/2,
+ lfs->cfg->prog_size))) {
+ break;
+ }
+
+ split = split + ((end - split) / 2);
+ }
+
+ if (split == begin) {
+ // no split needed
+ break;
+ }
+
+ // split into two metadata pairs and continue
+ int err = lfs_dir_split(lfs, dir, attrs, attrcount,
+ source, split, end);
+ if (err && err != LFS_ERR_NOSPC) {
+ return err;
+ }
+
+ if (err) {
+ // we can't allocate a new block, try to compact with degraded
+ // performance
+ LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
+ dir->pair[0], dir->pair[1]);
+ break;
+ } else {
+ end = split;
+ }
+ }
+
+ if (lfs_dir_needsrelocation(lfs, dir)
+ && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
+ // oh no! we're writing too much to the superblock,
+ // should we expand?
+ lfs_ssize_t size = lfs_fs_rawsize(lfs);
+ if (size < 0) {
+ return size;
+ }
+
+ // do we have extra space? littlefs can't reclaim this space
+ // by itself, so expand cautiously
+ if ((lfs_size_t)size < lfs->cfg->block_count/2) {
+ LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
+ int err = lfs_dir_split(lfs, dir, attrs, attrcount,
+ source, begin, end);
+ if (err && err != LFS_ERR_NOSPC) {
+ return err;
+ }
+
+ if (err) {
+ // welp, we tried, if we ran out of space there's not much
+ // we can do, we'll error later if we've become frozen
+ LFS_WARN("Unable to expand superblock");
+ } else {
+ end = begin;
+ }
+ }
+ }
+
+ return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
+ const lfs_block_t pair[2],
+ const struct lfs_mattr *attrs, int attrcount,
+ lfs_mdir_t *pdir) {
+ int state = 0;
+
+ // calculate changes to the directory
+ bool hasdelete = false;
+ for (int i = 0; i < attrcount; i++) {
+ if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
+ dir->count += 1;
+ } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
+ LFS_ASSERT(dir->count > 0);
+ dir->count -= 1;
+ hasdelete = true;
+ } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
+ dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
+ dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
+ dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
+ lfs_pair_fromle32(dir->tail);
+ }
+ }
+
+ // should we actually drop the directory block?
+ if (hasdelete && dir->count == 0) {
+ LFS_ASSERT(pdir);
+ int err = lfs_fs_pred(lfs, dir->pair, pdir);
+ if (err && err != LFS_ERR_NOENT) {
+ return err;
+ }
+
+ if (err != LFS_ERR_NOENT && pdir->split) {
+ state = LFS_OK_DROPPED;
+ goto fixmlist;
+ }
+ }
+
+ if (dir->erased) {
+ // try to commit
+ struct lfs_commit commit = {
+ .block = dir->pair[0],
+ .off = dir->off,
+ .ptag = dir->etag,
+ .crc = 0xffffffff,
+
+ .begin = dir->off,
+ .end = (lfs->cfg->metadata_max ?
+ lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
+ };
+
+ // traverse attrs that need to be written out
+ lfs_pair_tole32(dir->tail);
+ int err = lfs_dir_traverse(lfs,
+ dir, dir->off, dir->etag, attrs, attrcount,
+ 0, 0, 0, 0, 0,
+ lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
+ lfs, &commit});
+ lfs_pair_fromle32(dir->tail);
+ if (err) {
+ if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
+ goto compact;
+ }
+ return err;
+ }
+
+ // commit any global diffs if we have any
+ lfs_gstate_t delta = {0};
+ lfs_gstate_xor(&delta, &lfs->gstate);
+ lfs_gstate_xor(&delta, &lfs->gdisk);
+ lfs_gstate_xor(&delta, &lfs->gdelta);
+ delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
+ if (!lfs_gstate_iszero(&delta)) {
+ err = lfs_dir_getgstate(lfs, dir, &delta);
+ if (err) {
+ return err;
+ }
+
+ lfs_gstate_tole32(&delta);
+ err = lfs_dir_commitattr(lfs, &commit,
+ LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
+ sizeof(delta)), &delta);
+ if (err) {
+ if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
+ goto compact;
+ }
+ return err;
+ }
+ }
+
+ // finalize commit with the crc
+ err = lfs_dir_commitcrc(lfs, &commit);
+ if (err) {
+ if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
+ goto compact;
+ }
+ return err;
+ }
+
+ // successful commit, update dir
+ LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
+ dir->off = commit.off;
+ dir->etag = commit.ptag;
+ // and update gstate
+ lfs->gdisk = lfs->gstate;
+ lfs->gdelta = (lfs_gstate_t){0};
+
+ goto fixmlist;
+ }
+
+compact:
+ // fall back to compaction
+ lfs_cache_drop(lfs, &lfs->pcache);
+
+ state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
+ dir, 0, dir->count);
+ if (state < 0) {
+ return state;
+ }
+
+ goto fixmlist;
+
+fixmlist:;
+ // this complicated bit of logic is for fixing up any active
+ // metadata-pairs that we may have affected
+ //
+ // note we have to make two passes since the mdir passed to
+ // lfs_dir_commit could also be in this list, and even then
+ // we need to copy the pair so they don't get clobbered if we refetch
+ // our mdir.
+ lfs_block_t oldpair[2] = {pair[0], pair[1]};
+ for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
+ if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
+ d->m = *dir;
+ if (d->m.pair != pair) {
+ for (int i = 0; i < attrcount; i++) {
+ if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
+ d->id == lfs_tag_id(attrs[i].tag)) {
+ d->m.pair[0] = LFS_BLOCK_NULL;
+ d->m.pair[1] = LFS_BLOCK_NULL;
+ } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
+ d->id > lfs_tag_id(attrs[i].tag)) {
+ d->id -= 1;
+ if (d->type == LFS_TYPE_DIR) {
+ ((lfs_dir_t*)d)->pos -= 1;
+ }
+ } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
+ d->id >= lfs_tag_id(attrs[i].tag)) {
+ d->id += 1;
+ if (d->type == LFS_TYPE_DIR) {
+ ((lfs_dir_t*)d)->pos += 1;
+ }
+ }
+ }
+ }
+
+ while (d->id >= d->m.count && d->m.split) {
+ // we split and id is on tail now
+ d->id -= d->m.count;
+ int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
+ if (err) {
+ return err;
+ }
+ }
+ }
+ }
+
+ return state;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
+ const struct lfs_mattr *attrs, int attrcount) {
+ // check for any inline files that aren't RAM backed and
+ // forcefully evict them, needed for filesystem consistency
+ for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
+ if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
+ f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
+ f->ctz.size > lfs->cfg->cache_size) {
+ int err = lfs_file_outline(lfs, f);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_file_flush(lfs, f);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
+ lfs_mdir_t ldir = *dir;
+ lfs_mdir_t pdir;
+ int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
+ attrs, attrcount, &pdir);
+ if (state < 0) {
+ return state;
+ }
+
+ // update if we're not in mlist, note we may have already been
+ // updated if we are in mlist
+ if (lfs_pair_cmp(dir->pair, lpair) == 0) {
+ *dir = ldir;
+ }
+
+ // commit was successful, but may require other changes in the
+ // filesystem, these would normally be tail recursive, but we have
+ // flattened them here avoid unbounded stack usage
+
+ // need to drop?
+ if (state == LFS_OK_DROPPED) {
+ // steal state
+ int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
+ if (err) {
+ return err;
+ }
+
+ // steal tail, note that this can't create a recursive drop
+ lpair[0] = pdir.pair[0];
+ lpair[1] = pdir.pair[1];
+ lfs_pair_tole32(dir->tail);
+ state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
+ dir->tail}),
+ NULL);
+ lfs_pair_fromle32(dir->tail);
+ if (state < 0) {
+ return state;
+ }
+
+ ldir = pdir;
+ }
+
+ // need to relocate?
+ bool orphans = false;
+ while (state == LFS_OK_RELOCATED) {
+ LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
+ "-> {0x%"PRIx32", 0x%"PRIx32"}",
+ lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
+ state = 0;
+
+ // update internal root
+ if (lfs_pair_cmp(lpair, lfs->root) == 0) {
+ lfs->root[0] = ldir.pair[0];
+ lfs->root[1] = ldir.pair[1];
+ }
+
+ // update internally tracked dirs
+ for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
+ if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
+ d->m.pair[0] = ldir.pair[0];
+ d->m.pair[1] = ldir.pair[1];
+ }
+
+ if (d->type == LFS_TYPE_DIR &&
+ lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
+ ((lfs_dir_t*)d)->head[0] = ldir.pair[0];
+ ((lfs_dir_t*)d)->head[1] = ldir.pair[1];
+ }
+ }
+
+ // find parent
+ lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
+ if (tag < 0 && tag != LFS_ERR_NOENT) {
+ return tag;
+ }
+
+ bool hasparent = (tag != LFS_ERR_NOENT);
+ if (tag != LFS_ERR_NOENT) {
+ // note that if we have a parent, we must have a pred, so this will
+ // always create an orphan
+ int err = lfs_fs_preporphans(lfs, +1);
+ if (err) {
+ return err;
+ }
+
+ // fix pending move in this pair? this looks like an optimization but
+ // is in fact _required_ since relocating may outdate the move.
+ uint16_t moveid = 0x3ff;
+ if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
+ moveid = lfs_tag_id(lfs->gstate.tag);
+ LFS_DEBUG("Fixing move while relocating "
+ "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
+ pdir.pair[0], pdir.pair[1], moveid);
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ if (moveid < lfs_tag_id(tag)) {
+ tag -= LFS_MKTAG(0, 1, 0);
+ }
+ }
+
+ lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
+ lfs_pair_tole32(ldir.pair);
+ state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
+ {LFS_MKTAG_IF(moveid != 0x3ff,
+ LFS_TYPE_DELETE, moveid, 0), NULL},
+ {tag, ldir.pair}),
+ NULL);
+ lfs_pair_fromle32(ldir.pair);
+ if (state < 0) {
+ return state;
+ }
+
+ if (state == LFS_OK_RELOCATED) {
+ lpair[0] = ppair[0];
+ lpair[1] = ppair[1];
+ ldir = pdir;
+ orphans = true;
+ continue;
+ }
+ }
+
+ // find pred
+ int err = lfs_fs_pred(lfs, lpair, &pdir);
+ if (err && err != LFS_ERR_NOENT) {
+ return err;
+ }
+ LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
+
+ // if we can't find dir, it must be new
+ if (err != LFS_ERR_NOENT) {
+ if (lfs_gstate_hasorphans(&lfs->gstate)) {
+ // next step, clean up orphans
+ err = lfs_fs_preporphans(lfs, -hasparent);
+ if (err) {
+ return err;
+ }
+ }
+
+ // fix pending move in this pair? this looks like an optimization
+ // but is in fact _required_ since relocating may outdate the move.
+ uint16_t moveid = 0x3ff;
+ if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
+ moveid = lfs_tag_id(lfs->gstate.tag);
+ LFS_DEBUG("Fixing move while relocating "
+ "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
+ pdir.pair[0], pdir.pair[1], moveid);
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ }
+
+ // replace bad pair, either we clean up desync, or no desync occured
+ lpair[0] = pdir.pair[0];
+ lpair[1] = pdir.pair[1];
+ lfs_pair_tole32(ldir.pair);
+ state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
+ {LFS_MKTAG_IF(moveid != 0x3ff,
+ LFS_TYPE_DELETE, moveid, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
+ ldir.pair}),
+ NULL);
+ lfs_pair_fromle32(ldir.pair);
+ if (state < 0) {
+ return state;
+ }
+
+ ldir = pdir;
+ }
+ }
+
+ return orphans ? LFS_OK_ORPHANED : 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
+ const struct lfs_mattr *attrs, int attrcount) {
+ int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
+ if (orphans < 0) {
+ return orphans;
+ }
+
+ if (orphans) {
+ // make sure we've removed all orphans, this is a noop if there
+ // are none, but if we had nested blocks failures we may have
+ // created some
+ int err = lfs_fs_deorphan(lfs, false);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+
+/// Top level directory operations ///
+#ifndef LFS_READONLY
+static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
+ // deorphan if we haven't yet, needed at most once after poweron
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+
+ struct lfs_mlist cwd;
+ cwd.next = lfs->mlist;
+ uint16_t id;
+ err = lfs_dir_find(lfs, &cwd.m, &path, &id);
+ if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
+ return (err < 0) ? err : LFS_ERR_EXIST;
+ }
+
+ // check that name fits
+ lfs_size_t nlen = strlen(path);
+ if (nlen > lfs->name_max) {
+ return LFS_ERR_NAMETOOLONG;
+ }
+
+ // build up new directory
+ lfs_alloc_ack(lfs);
+ lfs_mdir_t dir;
+ err = lfs_dir_alloc(lfs, &dir);
+ if (err) {
+ return err;
+ }
+
+ // find end of list
+ lfs_mdir_t pred = cwd.m;
+ while (pred.split) {
+ err = lfs_dir_fetch(lfs, &pred, pred.tail);
+ if (err) {
+ return err;
+ }
+ }
+
+ // setup dir
+ lfs_pair_tole32(pred.tail);
+ err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
+ lfs_pair_fromle32(pred.tail);
+ if (err) {
+ return err;
+ }
+
+ // current block not end of list?
+ if (cwd.m.split) {
+ // update tails, this creates a desync
+ err = lfs_fs_preporphans(lfs, +1);
+ if (err) {
+ return err;
+ }
+
+ // it's possible our predecessor has to be relocated, and if
+ // our parent is our predecessor's predecessor, this could have
+ // caused our parent to go out of date, fortunately we can hook
+ // ourselves into littlefs to catch this
+ cwd.type = 0;
+ cwd.id = 0;
+ lfs->mlist = &cwd;
+
+ lfs_pair_tole32(dir.pair);
+ err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
+ lfs_pair_fromle32(dir.pair);
+ if (err) {
+ lfs->mlist = cwd.next;
+ return err;
+ }
+
+ lfs->mlist = cwd.next;
+ err = lfs_fs_preporphans(lfs, -1);
+ if (err) {
+ return err;
+ }
+ }
+
+ // now insert into our parent block
+ lfs_pair_tole32(dir.pair);
+ err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
+ {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
+ {LFS_MKTAG_IF(!cwd.m.split,
+ LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
+ lfs_pair_fromle32(dir.pair);
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+static int lfs_dir_rawopen(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
+ lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
+ if (tag < 0) {
+ return tag;
+ }
+
+ if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
+ return LFS_ERR_NOTDIR;
+ }
+
+ lfs_block_t pair[2];
+ if (lfs_tag_id(tag) == 0x3ff) {
+ // handle root dir separately
+ pair[0] = lfs->root[0];
+ pair[1] = lfs->root[1];
+ } else {
+ // get dir pair from parent
+ lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
+ if (res < 0) {
+ return res;
+ }
+ lfs_pair_fromle32(pair);
+ }
+
+ // fetch first pair
+ int err = lfs_dir_fetch(lfs, &dir->m, pair);
+ if (err) {
+ return err;
+ }
+
+ // setup entry
+ dir->head[0] = dir->m.pair[0];
+ dir->head[1] = dir->m.pair[1];
+ dir->id = 0;
+ dir->pos = 0;
+
+ // add to list of mdirs
+ dir->type = LFS_TYPE_DIR;
+ lfs_mlist_append(lfs, (struct lfs_mlist *)dir);
+
+ return 0;
+}
+
+static int lfs_dir_rawclose(lfs_t *lfs, lfs_dir_t *dir) {
+ // remove from list of mdirs
+ lfs_mlist_remove(lfs, (struct lfs_mlist *)dir);
+
+ return 0;
+}
+
+static int lfs_dir_rawread(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
+ memset(info, 0, sizeof(*info));
+
+ // special offset for '.' and '..'
+ if (dir->pos == 0) {
+ info->type = LFS_TYPE_DIR;
+ strcpy(info->name, ".");
+ dir->pos += 1;
+ return true;
+ } else if (dir->pos == 1) {
+ info->type = LFS_TYPE_DIR;
+ strcpy(info->name, "..");
+ dir->pos += 1;
+ return true;
+ }
+
+ while (true) {
+ if (dir->id == dir->m.count) {
+ if (!dir->m.split) {
+ return false;
+ }
+
+ int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
+ if (err) {
+ return err;
+ }
+
+ dir->id = 0;
+ }
+
+ int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
+ if (err && err != LFS_ERR_NOENT) {
+ return err;
+ }
+
+ dir->id += 1;
+ if (err != LFS_ERR_NOENT) {
+ break;
+ }
+ }
+
+ dir->pos += 1;
+ return true;
+}
+
+static int lfs_dir_rawseek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
+ // simply walk from head dir
+ int err = lfs_dir_rawrewind(lfs, dir);
+ if (err) {
+ return err;
+ }
+
+ // first two for ./..
+ dir->pos = lfs_min(2, off);
+ off -= dir->pos;
+
+ // skip superblock entry
+ dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
+
+ while (off > 0) {
+ int diff = lfs_min(dir->m.count - dir->id, off);
+ dir->id += diff;
+ dir->pos += diff;
+ off -= diff;
+
+ if (dir->id == dir->m.count) {
+ if (!dir->m.split) {
+ return LFS_ERR_INVAL;
+ }
+
+ err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
+ if (err) {
+ return err;
+ }
+
+ dir->id = 0;
+ }
+ }
+
+ return 0;
+}
+
+static lfs_soff_t lfs_dir_rawtell(lfs_t *lfs, lfs_dir_t *dir) {
+ (void)lfs;
+ return dir->pos;
+}
+
+static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir) {
+ // reload the head dir
+ int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
+ if (err) {
+ return err;
+ }
+
+ dir->id = 0;
+ dir->pos = 0;
+ return 0;
+}
+
+
+/// File index list operations ///
+static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
+ lfs_off_t size = *off;
+ lfs_off_t b = lfs->cfg->block_size - 2*4;
+ lfs_off_t i = size / b;
+ if (i == 0) {
+ return 0;
+ }
+
+ i = (size - 4*(lfs_popc(i-1)+2)) / b;
+ *off = size - b*i - 4*lfs_popc(i);
+ return i;
+}
+
+static int lfs_ctz_find(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache,
+ lfs_block_t head, lfs_size_t size,
+ lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
+ if (size == 0) {
+ *block = LFS_BLOCK_NULL;
+ *off = 0;
+ return 0;
+ }
+
+ lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
+ lfs_off_t target = lfs_ctz_index(lfs, &pos);
+
+ while (current > target) {
+ lfs_size_t skip = lfs_min(
+ lfs_npw2(current-target+1) - 1,
+ lfs_ctz(current));
+
+ int err = lfs_bd_read(lfs,
+ pcache, rcache, sizeof(head),
+ head, 4*skip, &head, sizeof(head));
+ head = lfs_fromle32(head);
+ if (err) {
+ return err;
+ }
+
+ current -= 1 << skip;
+ }
+
+ *block = head;
+ *off = pos;
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_ctz_extend(lfs_t *lfs,
+ lfs_cache_t *pcache, lfs_cache_t *rcache,
+ lfs_block_t head, lfs_size_t size,
+ lfs_block_t *block, lfs_off_t *off) {
+ while (true) {
+ // go ahead and grab a block
+ lfs_block_t nblock;
+ int err = lfs_alloc(lfs, &nblock);
+ if (err) {
+ return err;
+ }
+
+ {
+ err = lfs_bd_erase(lfs, nblock);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ if (size == 0) {
+ *block = nblock;
+ *off = 0;
+ return 0;
+ }
+
+ lfs_size_t noff = size - 1;
+ lfs_off_t index = lfs_ctz_index(lfs, &noff);
+ noff = noff + 1;
+
+ // just copy out the last block if it is incomplete
+ if (noff != lfs->cfg->block_size) {
+ for (lfs_off_t i = 0; i < noff; i++) {
+ uint8_t data;
+ err = lfs_bd_read(lfs,
+ NULL, rcache, noff-i,
+ head, i, &data, 1);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_bd_prog(lfs,
+ pcache, rcache, true,
+ nblock, i, &data, 1);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+ }
+
+ *block = nblock;
+ *off = noff;
+ return 0;
+ }
+
+ // append block
+ index += 1;
+ lfs_size_t skips = lfs_ctz(index) + 1;
+ lfs_block_t nhead = head;
+ for (lfs_off_t i = 0; i < skips; i++) {
+ nhead = lfs_tole32(nhead);
+ err = lfs_bd_prog(lfs, pcache, rcache, true,
+ nblock, 4*i, &nhead, 4);
+ nhead = lfs_fromle32(nhead);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ if (i != skips-1) {
+ err = lfs_bd_read(lfs,
+ NULL, rcache, sizeof(nhead),
+ nhead, 4*i, &nhead, sizeof(nhead));
+ nhead = lfs_fromle32(nhead);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ *block = nblock;
+ *off = 4*skips;
+ return 0;
+ }
+
+relocate:
+ LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
+
+ // just clear cache and try a new block
+ lfs_cache_drop(lfs, pcache);
+ }
+}
+#endif
+
+static int lfs_ctz_traverse(lfs_t *lfs,
+ const lfs_cache_t *pcache, lfs_cache_t *rcache,
+ lfs_block_t head, lfs_size_t size,
+ int (*cb)(void*, lfs_block_t), void *data) {
+ if (size == 0) {
+ return 0;
+ }
+
+ lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
+
+ while (true) {
+ int err = cb(data, head);
+ if (err) {
+ return err;
+ }
+
+ if (index == 0) {
+ return 0;
+ }
+
+ lfs_block_t heads[2];
+ int count = 2 - (index & 1);
+ err = lfs_bd_read(lfs,
+ pcache, rcache, count*sizeof(head),
+ head, 0, &heads, count*sizeof(head));
+ heads[0] = lfs_fromle32(heads[0]);
+ heads[1] = lfs_fromle32(heads[1]);
+ if (err) {
+ return err;
+ }
+
+ for (int i = 0; i < count-1; i++) {
+ err = cb(data, heads[i]);
+ if (err) {
+ return err;
+ }
+ }
+
+ head = heads[count-1];
+ index -= count;
+ }
+}
+
+
+/// Top level file operations ///
+static int lfs_file_rawopencfg(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags,
+ const struct lfs_file_config *cfg) {
+#ifndef LFS_READONLY
+ // deorphan if we haven't yet, needed at most once after poweron
+ if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+ }
+#else
+ LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY);
+#endif
+
+ // setup simple file details
+ int err;
+ file->cfg = cfg;
+ file->flags = flags;
+ file->pos = 0;
+ file->off = 0;
+ file->cache.buffer = NULL;
+
+ // allocate entry for file if it doesn't exist
+ lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
+ if (tag < 0 && !(tag == LFS_ERR_NOENT && file->id != 0x3ff)) {
+ err = tag;
+ goto cleanup;
+ }
+
+ // get id, add to list of mdirs to catch update changes
+ file->type = LFS_TYPE_REG;
+ lfs_mlist_append(lfs, (struct lfs_mlist *)file);
+
+#ifdef LFS_READONLY
+ if (tag == LFS_ERR_NOENT) {
+ err = LFS_ERR_NOENT;
+ goto cleanup;
+#else
+ if (tag == LFS_ERR_NOENT) {
+ if (!(flags & LFS_O_CREAT)) {
+ err = LFS_ERR_NOENT;
+ goto cleanup;
+ }
+
+ // check that name fits
+ lfs_size_t nlen = strlen(path);
+ if (nlen > lfs->name_max) {
+ err = LFS_ERR_NAMETOOLONG;
+ goto cleanup;
+ }
+
+ // get next slot and create entry to remember name
+ err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
+
+ // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will
+ // not fit in a 128 byte block.
+ err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err;
+ if (err) {
+ goto cleanup;
+ }
+
+ tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
+ } else if (flags & LFS_O_EXCL) {
+ err = LFS_ERR_EXIST;
+ goto cleanup;
+#endif
+ } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
+ err = LFS_ERR_ISDIR;
+ goto cleanup;
+#ifndef LFS_READONLY
+ } else if (flags & LFS_O_TRUNC) {
+ // truncate if requested
+ tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
+ file->flags |= LFS_F_DIRTY;
+#endif
+ } else {
+ // try to load what's on disk, if it's inlined we'll fix it later
+ tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
+ if (tag < 0) {
+ err = tag;
+ goto cleanup;
+ }
+ lfs_ctz_fromle32(&file->ctz);
+ }
+
+ // fetch attrs
+ for (unsigned i = 0; i < file->cfg->attr_count; i++) {
+ // if opened for read / read-write operations
+ if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) {
+ lfs_stag_t res = lfs_dir_get(lfs, &file->m,
+ LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
+ file->id, file->cfg->attrs[i].size),
+ file->cfg->attrs[i].buffer);
+ if (res < 0 && res != LFS_ERR_NOENT) {
+ err = res;
+ goto cleanup;
+ }
+ }
+
+#ifndef LFS_READONLY
+ // if opened for write / read-write operations
+ if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
+ if (file->cfg->attrs[i].size > lfs->attr_max) {
+ err = LFS_ERR_NOSPC;
+ goto cleanup;
+ }
+
+ file->flags |= LFS_F_DIRTY;
+ }
+#endif
+ }
+
+ // allocate buffer if needed
+ if (file->cfg->buffer) {
+ file->cache.buffer = file->cfg->buffer;
+ } else {
+ file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
+ if (!file->cache.buffer) {
+ err = LFS_ERR_NOMEM;
+ goto cleanup;
+ }
+ }
+
+ // zero to avoid information leak
+ lfs_cache_zero(lfs, &file->cache);
+
+ if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
+ // load inline files
+ file->ctz.head = LFS_BLOCK_INLINE;
+ file->ctz.size = lfs_tag_size(tag);
+ file->flags |= LFS_F_INLINE;
+ file->cache.block = file->ctz.head;
+ file->cache.off = 0;
+ file->cache.size = lfs->cfg->cache_size;
+
+ // don't always read (may be new/trunc file)
+ if (file->ctz.size > 0) {
+ lfs_stag_t res = lfs_dir_get(lfs, &file->m,
+ LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
+ lfs_min(file->cache.size, 0x3fe)),
+ file->cache.buffer);
+ if (res < 0) {
+ err = res;
+ goto cleanup;
+ }
+ }
+ }
+
+ return 0;
+
+cleanup:
+ // clean up lingering resources
+#ifndef LFS_READONLY
+ file->flags |= LFS_F_ERRED;
+#endif
+ lfs_file_rawclose(lfs, file);
+ return err;
+}
+
+#ifndef LFS_NO_MALLOC
+static int lfs_file_rawopen(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags) {
+ static const struct lfs_file_config defaults = {0};
+ int err = lfs_file_rawopencfg(lfs, file, path, flags, &defaults);
+ return err;
+}
+#endif
+
+static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file) {
+#ifndef LFS_READONLY
+ int err = lfs_file_rawsync(lfs, file);
+#else
+ int err = 0;
+#endif
+
+ // remove from list of mdirs
+ lfs_mlist_remove(lfs, (struct lfs_mlist*)file);
+
+ // clean up memory
+ if (!file->cfg->buffer) {
+ lfs_free(file->cache.buffer);
+ }
+
+ return err;
+}
+
+
+#ifndef LFS_READONLY
+static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
+ while (true) {
+ // just relocate what exists into new block
+ lfs_block_t nblock;
+ int err = lfs_alloc(lfs, &nblock);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_bd_erase(lfs, nblock);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ // either read from dirty cache or disk
+ for (lfs_off_t i = 0; i < file->off; i++) {
+ uint8_t data;
+ if (file->flags & LFS_F_INLINE) {
+ err = lfs_dir_getread(lfs, &file->m,
+ // note we evict inline files before they can be dirty
+ NULL, &file->cache, file->off-i,
+ LFS_MKTAG(0xfff, 0x1ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
+ i, &data, 1);
+ if (err) {
+ return err;
+ }
+ } else {
+ err = lfs_bd_read(lfs,
+ &file->cache, &lfs->rcache, file->off-i,
+ file->block, i, &data, 1);
+ if (err) {
+ return err;
+ }
+ }
+
+ err = lfs_bd_prog(lfs,
+ &lfs->pcache, &lfs->rcache, true,
+ nblock, i, &data, 1);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+ }
+
+ // copy over new state of file
+ memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
+ file->cache.block = lfs->pcache.block;
+ file->cache.off = lfs->pcache.off;
+ file->cache.size = lfs->pcache.size;
+ lfs_cache_zero(lfs, &lfs->pcache);
+
+ file->block = nblock;
+ file->flags |= LFS_F_WRITING;
+ return 0;
+
+relocate:
+ LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
+
+ // just clear cache and try a new block
+ lfs_cache_drop(lfs, &lfs->pcache);
+ }
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
+ file->off = file->pos;
+ lfs_alloc_ack(lfs);
+ int err = lfs_file_relocate(lfs, file);
+ if (err) {
+ return err;
+ }
+
+ file->flags &= ~LFS_F_INLINE;
+ return 0;
+}
+#endif
+
+static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
+ if (file->flags & LFS_F_READING) {
+ if (!(file->flags & LFS_F_INLINE)) {
+ lfs_cache_drop(lfs, &file->cache);
+ }
+ file->flags &= ~LFS_F_READING;
+ }
+
+#ifndef LFS_READONLY
+ if (file->flags & LFS_F_WRITING) {
+ lfs_off_t pos = file->pos;
+
+ if (!(file->flags & LFS_F_INLINE)) {
+ // copy over anything after current branch
+ lfs_file_t orig = {
+ .ctz.head = file->ctz.head,
+ .ctz.size = file->ctz.size,
+ .flags = LFS_O_RDONLY,
+ .pos = file->pos,
+ .cache = lfs->rcache,
+ };
+ lfs_cache_drop(lfs, &lfs->rcache);
+
+ while (file->pos < file->ctz.size) {
+ // copy over a byte at a time, leave it up to caching
+ // to make this efficient
+ uint8_t data;
+ lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1);
+ if (res < 0) {
+ return res;
+ }
+
+ res = lfs_file_flushedwrite(lfs, file, &data, 1);
+ if (res < 0) {
+ return res;
+ }
+
+ // keep our reference to the rcache in sync
+ if (lfs->rcache.block != LFS_BLOCK_NULL) {
+ lfs_cache_drop(lfs, &orig.cache);
+ lfs_cache_drop(lfs, &lfs->rcache);
+ }
+ }
+
+ // write out what we have
+ while (true) {
+ int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ return err;
+ }
+
+ break;
+
+relocate:
+ LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
+ err = lfs_file_relocate(lfs, file);
+ if (err) {
+ return err;
+ }
+ }
+ } else {
+ file->pos = lfs_max(file->pos, file->ctz.size);
+ }
+
+ // actual file updates
+ file->ctz.head = file->block;
+ file->ctz.size = file->pos;
+ file->flags &= ~LFS_F_WRITING;
+ file->flags |= LFS_F_DIRTY;
+
+ file->pos = pos;
+ }
+#endif
+
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file) {
+ if (file->flags & LFS_F_ERRED) {
+ // it's not safe to do anything if our file errored
+ return 0;
+ }
+
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+
+
+ if ((file->flags & LFS_F_DIRTY) &&
+ !lfs_pair_isnull(file->m.pair)) {
+ // update dir entry
+ uint16_t type;
+ const void *buffer;
+ lfs_size_t size;
+ struct lfs_ctz ctz;
+ if (file->flags & LFS_F_INLINE) {
+ // inline the whole file
+ type = LFS_TYPE_INLINESTRUCT;
+ buffer = file->cache.buffer;
+ size = file->ctz.size;
+ } else {
+ // update the ctz reference
+ type = LFS_TYPE_CTZSTRUCT;
+ // copy ctz so alloc will work during a relocate
+ ctz = file->ctz;
+ lfs_ctz_tole32(&ctz);
+ buffer = &ctz;
+ size = sizeof(ctz);
+ }
+
+ // commit file data and attributes
+ err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
+ {LFS_MKTAG(type, file->id, size), buffer},
+ {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
+ file->cfg->attr_count), file->cfg->attrs}));
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+
+ file->flags &= ~LFS_F_DIRTY;
+ }
+
+ return 0;
+}
+#endif
+
+static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size) {
+ uint8_t *data = buffer;
+ lfs_size_t nsize = size;
+
+ if (file->pos >= file->ctz.size) {
+ // eof if past end
+ return 0;
+ }
+
+ size = lfs_min(size, file->ctz.size - file->pos);
+ nsize = size;
+
+ while (nsize > 0) {
+ // check if we need a new block
+ if (!(file->flags & LFS_F_READING) ||
+ file->off == lfs->cfg->block_size) {
+ if (!(file->flags & LFS_F_INLINE)) {
+ int err = lfs_ctz_find(lfs, NULL, &file->cache,
+ file->ctz.head, file->ctz.size,
+ file->pos, &file->block, &file->off);
+ if (err) {
+ return err;
+ }
+ } else {
+ file->block = LFS_BLOCK_INLINE;
+ file->off = file->pos;
+ }
+
+ file->flags |= LFS_F_READING;
+ }
+
+ // read as much as we can in current block
+ lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
+ if (file->flags & LFS_F_INLINE) {
+ int err = lfs_dir_getread(lfs, &file->m,
+ NULL, &file->cache, lfs->cfg->block_size,
+ LFS_MKTAG(0xfff, 0x1ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
+ file->off, data, diff);
+ if (err) {
+ return err;
+ }
+ } else {
+ int err = lfs_bd_read(lfs,
+ NULL, &file->cache, lfs->cfg->block_size,
+ file->block, file->off, data, diff);
+ if (err) {
+ return err;
+ }
+ }
+
+ file->pos += diff;
+ file->off += diff;
+ data += diff;
+ nsize -= diff;
+ }
+
+ return size;
+}
+
+static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size) {
+ LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
+
+#ifndef LFS_READONLY
+ if (file->flags & LFS_F_WRITING) {
+ // flush out any writes
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ return err;
+ }
+ }
+#endif
+
+ return lfs_file_flushedread(lfs, file, buffer, size);
+}
+
+
+#ifndef LFS_READONLY
+static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size) {
+ const uint8_t *data = buffer;
+ lfs_size_t nsize = size;
+
+ if ((file->flags & LFS_F_INLINE) &&
+ lfs_max(file->pos+nsize, file->ctz.size) >
+ lfs_min(0x3fe, lfs_min(
+ lfs->cfg->cache_size,
+ (lfs->cfg->metadata_max ?
+ lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
+ // inline file doesn't fit anymore
+ int err = lfs_file_outline(lfs, file);
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+ }
+
+ while (nsize > 0) {
+ // check if we need a new block
+ if (!(file->flags & LFS_F_WRITING) ||
+ file->off == lfs->cfg->block_size) {
+ if (!(file->flags & LFS_F_INLINE)) {
+ if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
+ // find out which block we're extending from
+ int err = lfs_ctz_find(lfs, NULL, &file->cache,
+ file->ctz.head, file->ctz.size,
+ file->pos-1, &file->block, &file->off);
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+
+ // mark cache as dirty since we may have read data into it
+ lfs_cache_zero(lfs, &file->cache);
+ }
+
+ // extend file with new blocks
+ lfs_alloc_ack(lfs);
+ int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
+ file->block, file->pos,
+ &file->block, &file->off);
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+ } else {
+ file->block = LFS_BLOCK_INLINE;
+ file->off = file->pos;
+ }
+
+ file->flags |= LFS_F_WRITING;
+ }
+
+ // program as much as we can in current block
+ lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
+ while (true) {
+ int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
+ file->block, file->off, data, diff);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ goto relocate;
+ }
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+
+ break;
+relocate:
+ err = lfs_file_relocate(lfs, file);
+ if (err) {
+ file->flags |= LFS_F_ERRED;
+ return err;
+ }
+ }
+
+ file->pos += diff;
+ file->off += diff;
+ data += diff;
+ nsize -= diff;
+
+ lfs_alloc_ack(lfs);
+ }
+
+ return size;
+}
+
+static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size) {
+ LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
+
+ if (file->flags & LFS_F_READING) {
+ // drop any reads
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ return err;
+ }
+ }
+
+ if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
+ file->pos = file->ctz.size;
+ }
+
+ if (file->pos + size > lfs->file_max) {
+ // Larger than file limit?
+ return LFS_ERR_FBIG;
+ }
+
+ if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
+ // fill with zeros
+ lfs_off_t pos = file->pos;
+ file->pos = file->ctz.size;
+
+ while (file->pos < pos) {
+ lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1);
+ if (res < 0) {
+ return res;
+ }
+ }
+ }
+
+ lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size);
+ if (nsize < 0) {
+ return nsize;
+ }
+
+ file->flags &= ~LFS_F_ERRED;
+ return nsize;
+}
+#endif
+
+static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
+ lfs_soff_t off, int whence) {
+ // find new pos
+ lfs_off_t npos = file->pos;
+ if (whence == LFS_SEEK_SET) {
+ npos = off;
+ } else if (whence == LFS_SEEK_CUR) {
+ if ((lfs_soff_t)file->pos + off < 0) {
+ return LFS_ERR_INVAL;
+ } else {
+ npos = file->pos + off;
+ }
+ } else if (whence == LFS_SEEK_END) {
+ lfs_soff_t res = lfs_file_rawsize(lfs, file) + off;
+ if (res < 0) {
+ return LFS_ERR_INVAL;
+ } else {
+ npos = res;
+ }
+ }
+
+ if (npos > lfs->file_max) {
+ // file position out of range
+ return LFS_ERR_INVAL;
+ }
+
+ if (file->pos == npos) {
+ // noop - position has not changed
+ return npos;
+ }
+
+ // if we're only reading and our new offset is still in the file's cache
+ // we can avoid flushing and needing to reread the data
+ if (
+#ifndef LFS_READONLY
+ !(file->flags & LFS_F_WRITING)
+#else
+ true
+#endif
+ ) {
+ int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos});
+ lfs_off_t noff = npos;
+ int nindex = lfs_ctz_index(lfs, &noff);
+ if (oindex == nindex
+ && noff >= file->cache.off
+ && noff < file->cache.off + file->cache.size) {
+ file->pos = npos;
+ file->off = noff;
+ return npos;
+ }
+ }
+
+ // write out everything beforehand, may be noop if rdonly
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ return err;
+ }
+
+ // update pos
+ file->pos = npos;
+ return npos;
+}
+
+#ifndef LFS_READONLY
+static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
+ LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
+
+ if (size > LFS_FILE_MAX) {
+ return LFS_ERR_INVAL;
+ }
+
+ lfs_off_t pos = file->pos;
+ lfs_off_t oldsize = lfs_file_rawsize(lfs, file);
+ if (size < oldsize) {
+ // need to flush since directly changing metadata
+ int err = lfs_file_flush(lfs, file);
+ if (err) {
+ return err;
+ }
+
+ // lookup new head in ctz skip list
+ err = lfs_ctz_find(lfs, NULL, &file->cache,
+ file->ctz.head, file->ctz.size,
+ size, &file->block, &file->off);
+ if (err) {
+ return err;
+ }
+
+ // need to set pos/block/off consistently so seeking back to
+ // the old position does not get confused
+ file->pos = size;
+ file->ctz.head = file->block;
+ file->ctz.size = size;
+ file->flags |= LFS_F_DIRTY | LFS_F_READING;
+ } else if (size > oldsize) {
+ // flush+seek if not already at end
+ lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END);
+ if (res < 0) {
+ return (int)res;
+ }
+
+ // fill with zeros
+ while (file->pos < size) {
+ res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
+ if (res < 0) {
+ return (int)res;
+ }
+ }
+ }
+
+ // restore pos
+ lfs_soff_t res = lfs_file_rawseek(lfs, file, pos, LFS_SEEK_SET);
+ if (res < 0) {
+ return (int)res;
+ }
+
+ return 0;
+}
+#endif
+
+static lfs_soff_t lfs_file_rawtell(lfs_t *lfs, lfs_file_t *file) {
+ (void)lfs;
+ return file->pos;
+}
+
+static int lfs_file_rawrewind(lfs_t *lfs, lfs_file_t *file) {
+ lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET);
+ if (res < 0) {
+ return (int)res;
+ }
+
+ return 0;
+}
+
+static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file) {
+ (void)lfs;
+
+#ifndef LFS_READONLY
+ if (file->flags & LFS_F_WRITING) {
+ return lfs_max(file->pos, file->ctz.size);
+ }
+#endif
+
+ return file->ctz.size;
+}
+
+
+/// General fs operations ///
+static int lfs_rawstat(lfs_t *lfs, const char *path, struct lfs_info *info) {
+ lfs_mdir_t cwd;
+ lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
+ if (tag < 0) {
+ return (int)tag;
+ }
+
+ return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
+}
+
+#ifndef LFS_READONLY
+static int lfs_rawremove(lfs_t *lfs, const char *path) {
+ // deorphan if we haven't yet, needed at most once after poweron
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+
+ lfs_mdir_t cwd;
+ lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
+ if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
+ return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
+ }
+
+ struct lfs_mlist dir;
+ dir.next = lfs->mlist;
+ if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
+ // must be empty before removal
+ lfs_block_t pair[2];
+ lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
+ if (res < 0) {
+ return (int)res;
+ }
+ lfs_pair_fromle32(pair);
+
+ err = lfs_dir_fetch(lfs, &dir.m, pair);
+ if (err) {
+ return err;
+ }
+
+ if (dir.m.count > 0 || dir.m.split) {
+ return LFS_ERR_NOTEMPTY;
+ }
+
+ // mark fs as orphaned
+ err = lfs_fs_preporphans(lfs, +1);
+ if (err) {
+ return err;
+ }
+
+ // I know it's crazy but yes, dir can be changed by our parent's
+ // commit (if predecessor is child)
+ dir.type = 0;
+ dir.id = 0;
+ lfs->mlist = &dir;
+ }
+
+ // delete the entry
+ err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL}));
+ if (err) {
+ lfs->mlist = dir.next;
+ return err;
+ }
+
+ lfs->mlist = dir.next;
+ if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
+ // fix orphan
+ err = lfs_fs_preporphans(lfs, -1);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_dir_drop(lfs, &cwd, &dir.m);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) {
+ // deorphan if we haven't yet, needed at most once after poweron
+ int err = lfs_fs_forceconsistency(lfs);
+ if (err) {
+ return err;
+ }
+
+ // find old entry
+ lfs_mdir_t oldcwd;
+ lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
+ if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
+ return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
+ }
+
+ // find new entry
+ lfs_mdir_t newcwd;
+ uint16_t newid;
+ lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
+ if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
+ !(prevtag == LFS_ERR_NOENT && newid != 0x3ff)) {
+ return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
+ }
+
+ // if we're in the same pair there's a few special cases...
+ bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
+ uint16_t newoldid = lfs_tag_id(oldtag);
+
+ struct lfs_mlist prevdir;
+ prevdir.next = lfs->mlist;
+ if (prevtag == LFS_ERR_NOENT) {
+ // check that name fits
+ lfs_size_t nlen = strlen(newpath);
+ if (nlen > lfs->name_max) {
+ return LFS_ERR_NAMETOOLONG;
+ }
+
+ // there is a small chance we are being renamed in the same
+ // directory/ to an id less than our old id, the global update
+ // to handle this is a bit messy
+ if (samepair && newid <= newoldid) {
+ newoldid += 1;
+ }
+ } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
+ return LFS_ERR_ISDIR;
+ } else if (samepair && newid == newoldid) {
+ // we're renaming to ourselves??
+ return 0;
+ } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
+ // must be empty before removal
+ lfs_block_t prevpair[2];
+ lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
+ if (res < 0) {
+ return (int)res;
+ }
+ lfs_pair_fromle32(prevpair);
+
+ // must be empty before removal
+ err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
+ if (err) {
+ return err;
+ }
+
+ if (prevdir.m.count > 0 || prevdir.m.split) {
+ return LFS_ERR_NOTEMPTY;
+ }
+
+ // mark fs as orphaned
+ err = lfs_fs_preporphans(lfs, +1);
+ if (err) {
+ return err;
+ }
+
+ // I know it's crazy but yes, dir can be changed by our parent's
+ // commit (if predecessor is child)
+ prevdir.type = 0;
+ prevdir.id = 0;
+ lfs->mlist = &prevdir;
+ }
+
+ if (!samepair) {
+ lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
+ }
+
+ // move over all attributes
+ err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
+ {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
+ LFS_TYPE_DELETE, newid, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL},
+ {LFS_MKTAG(lfs_tag_type3(oldtag), newid, strlen(newpath)), newpath},
+ {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
+ {LFS_MKTAG_IF(samepair,
+ LFS_TYPE_DELETE, newoldid, 0), NULL}));
+ if (err) {
+ lfs->mlist = prevdir.next;
+ return err;
+ }
+
+ // let commit clean up after move (if we're different! otherwise move
+ // logic already fixed it for us)
+ if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
+ // prep gstate and delete move id
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL}));
+ if (err) {
+ lfs->mlist = prevdir.next;
+ return err;
+ }
+ }
+
+ lfs->mlist = prevdir.next;
+ if (prevtag != LFS_ERR_NOENT
+ && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
+ // fix orphan
+ err = lfs_fs_preporphans(lfs, -1);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
+ if (err) {
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static lfs_ssize_t lfs_rawgetattr(lfs_t *lfs, const char *path,
+ uint8_t type, void *buffer, lfs_size_t size) {
+ lfs_mdir_t cwd;
+ lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
+ if (tag < 0) {
+ return tag;
+ }
+
+ uint16_t id = lfs_tag_id(tag);
+ if (id == 0x3ff) {
+ // special case for root
+ id = 0;
+ int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
+ if (err) {
+ return err;
+ }
+ }
+
+ tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_USERATTR + type,
+ id, lfs_min(size, lfs->attr_max)),
+ buffer);
+ if (tag < 0) {
+ if (tag == LFS_ERR_NOENT) {
+ return LFS_ERR_NOATTR;
+ }
+
+ return tag;
+ }
+
+ return lfs_tag_size(tag);
+}
+
+#ifndef LFS_READONLY
+static int lfs_commitattr(lfs_t *lfs, const char *path,
+ uint8_t type, const void *buffer, lfs_size_t size) {
+ lfs_mdir_t cwd;
+ lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
+ if (tag < 0) {
+ return tag;
+ }
+
+ uint16_t id = lfs_tag_id(tag);
+ if (id == 0x3ff) {
+ // special case for root
+ id = 0;
+ int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
+ if (err) {
+ return err;
+ }
+ }
+
+ return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_rawsetattr(lfs_t *lfs, const char *path,
+ uint8_t type, const void *buffer, lfs_size_t size) {
+ if (size > lfs->attr_max) {
+ return LFS_ERR_NOSPC;
+ }
+
+ return lfs_commitattr(lfs, path, type, buffer, size);
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_rawremoveattr(lfs_t *lfs, const char *path, uint8_t type) {
+ return lfs_commitattr(lfs, path, type, NULL, 0x3ff);
+}
+#endif
+
+
+/// Filesystem operations ///
+static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
+ lfs->cfg = cfg;
+ int err = 0;
+
+ // validate that the lfs-cfg sizes were initiated properly before
+ // performing any arithmetic logics with them
+ LFS_ASSERT(lfs->cfg->read_size != 0);
+ LFS_ASSERT(lfs->cfg->prog_size != 0);
+ LFS_ASSERT(lfs->cfg->cache_size != 0);
+
+ // check that block size is a multiple of cache size is a multiple
+ // of prog and read sizes
+ LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
+ LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
+ LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
+
+ // check that the block size is large enough to fit ctz pointers
+ LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
+ <= lfs->cfg->block_size);
+
+ // block_cycles = 0 is no longer supported.
+ //
+ // block_cycles is the number of erase cycles before littlefs evicts
+ // metadata logs as a part of wear leveling. Suggested values are in the
+ // range of 100-1000, or set block_cycles to -1 to disable block-level
+ // wear-leveling.
+ LFS_ASSERT(lfs->cfg->block_cycles != 0);
+
+
+ // setup read cache
+ if (lfs->cfg->read_buffer) {
+ lfs->rcache.buffer = lfs->cfg->read_buffer;
+ } else {
+ lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
+ if (!lfs->rcache.buffer) {
+ err = LFS_ERR_NOMEM;
+ goto cleanup;
+ }
+ }
+
+ // setup program cache
+ if (lfs->cfg->prog_buffer) {
+ lfs->pcache.buffer = lfs->cfg->prog_buffer;
+ } else {
+ lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
+ if (!lfs->pcache.buffer) {
+ err = LFS_ERR_NOMEM;
+ goto cleanup;
+ }
+ }
+
+ // zero to avoid information leaks
+ lfs_cache_zero(lfs, &lfs->rcache);
+ lfs_cache_zero(lfs, &lfs->pcache);
+
+ // setup lookahead, must be multiple of 64-bits, 32-bit aligned
+ LFS_ASSERT(lfs->cfg->lookahead_size > 0);
+ LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
+ (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
+ if (lfs->cfg->lookahead_buffer) {
+ lfs->free.buffer = lfs->cfg->lookahead_buffer;
+ } else {
+ lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
+ if (!lfs->free.buffer) {
+ err = LFS_ERR_NOMEM;
+ goto cleanup;
+ }
+ }
+
+ // check that the size limits are sane
+ LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
+ lfs->name_max = lfs->cfg->name_max;
+ if (!lfs->name_max) {
+ lfs->name_max = LFS_NAME_MAX;
+ }
+
+ LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
+ lfs->file_max = lfs->cfg->file_max;
+ if (!lfs->file_max) {
+ lfs->file_max = LFS_FILE_MAX;
+ }
+
+ LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
+ lfs->attr_max = lfs->cfg->attr_max;
+ if (!lfs->attr_max) {
+ lfs->attr_max = LFS_ATTR_MAX;
+ }
+
+ LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
+
+ // setup default state
+ lfs->root[0] = LFS_BLOCK_NULL;
+ lfs->root[1] = LFS_BLOCK_NULL;
+ lfs->mlist = NULL;
+ lfs->seed = 0;
+ lfs->gdisk = (lfs_gstate_t){0};
+ lfs->gstate = (lfs_gstate_t){0};
+ lfs->gdelta = (lfs_gstate_t){0};
+#ifdef LFS_MIGRATE
+ lfs->lfs1 = NULL;
+#endif
+
+ return 0;
+
+cleanup:
+ lfs_deinit(lfs);
+ return err;
+}
+
+static int lfs_deinit(lfs_t *lfs) {
+ // free allocated memory
+ if (!lfs->cfg->read_buffer) {
+ lfs_free(lfs->rcache.buffer);
+ }
+
+ if (!lfs->cfg->prog_buffer) {
+ lfs_free(lfs->pcache.buffer);
+ }
+
+ if (!lfs->cfg->lookahead_buffer) {
+ lfs_free(lfs->free.buffer);
+ }
+
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = 0;
+ {
+ err = lfs_init(lfs, cfg);
+ if (err) {
+ return err;
+ }
+
+ // create free lookahead
+ memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
+ lfs->free.off = 0;
+ lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
+ lfs->cfg->block_count);
+ lfs->free.i = 0;
+ lfs_alloc_ack(lfs);
+
+ // create root dir
+ lfs_mdir_t root;
+ err = lfs_dir_alloc(lfs, &root);
+ if (err) {
+ goto cleanup;
+ }
+
+ // write one superblock
+ lfs_superblock_t superblock = {
+ .version = LFS_DISK_VERSION,
+ .block_size = lfs->cfg->block_size,
+ .block_count = lfs->cfg->block_count,
+ .name_max = lfs->name_max,
+ .file_max = lfs->file_max,
+ .attr_max = lfs->attr_max,
+ };
+
+ lfs_superblock_tole32(&superblock);
+ err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock}));
+ if (err) {
+ goto cleanup;
+ }
+
+ // force compaction to prevent accidentally mounting any
+ // older version of littlefs that may live on disk
+ root.erased = false;
+ err = lfs_dir_commit(lfs, &root, NULL, 0);
+ if (err) {
+ goto cleanup;
+ }
+
+ // sanity check that fetch works
+ err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ lfs_deinit(lfs);
+ return err;
+
+}
+#endif
+
+static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = lfs_init(lfs, cfg);
+ if (err) {
+ return err;
+ }
+
+ // scan directory blocks for superblock and any global updates
+ lfs_mdir_t dir = {.tail = {0, 1}};
+ lfs_block_t cycle = 0;
+ while (!lfs_pair_isnull(dir.tail)) {
+ if (cycle >= lfs->cfg->block_count/2) {
+ // loop detected
+ err = LFS_ERR_CORRUPT;
+ goto cleanup;
+ }
+ cycle += 1;
+
+ // fetch next block in tail list
+ lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
+ LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
+ NULL,
+ lfs_dir_find_match, &(struct lfs_dir_find_match){
+ lfs, "littlefs", 8});
+ if (tag < 0) {
+ err = tag;
+ goto cleanup;
+ }
+
+ // has superblock?
+ if (tag && !lfs_tag_isdelete(tag)) {
+ // update root
+ lfs->root[0] = dir.pair[0];
+ lfs->root[1] = dir.pair[1];
+
+ // grab superblock
+ lfs_superblock_t superblock;
+ tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock);
+ if (tag < 0) {
+ err = tag;
+ goto cleanup;
+ }
+ lfs_superblock_fromle32(&superblock);
+
+ // check version
+ uint16_t major_version = (0xffff & (superblock.version >> 16));
+ uint16_t minor_version = (0xffff & (superblock.version >> 0));
+ if ((major_version != LFS_DISK_VERSION_MAJOR ||
+ minor_version > LFS_DISK_VERSION_MINOR)) {
+ LFS_ERROR("Invalid version v%"PRIu16".%"PRIu16,
+ major_version, minor_version);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ // check superblock configuration
+ if (superblock.name_max) {
+ if (superblock.name_max > lfs->name_max) {
+ LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
+ superblock.name_max, lfs->name_max);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ lfs->name_max = superblock.name_max;
+ }
+
+ if (superblock.file_max) {
+ if (superblock.file_max > lfs->file_max) {
+ LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
+ superblock.file_max, lfs->file_max);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ lfs->file_max = superblock.file_max;
+ }
+
+ if (superblock.attr_max) {
+ if (superblock.attr_max > lfs->attr_max) {
+ LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
+ superblock.attr_max, lfs->attr_max);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ lfs->attr_max = superblock.attr_max;
+ }
+
+ if (superblock.block_count != lfs->cfg->block_count) {
+ LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")",
+ superblock.block_count, lfs->cfg->block_count);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ if (superblock.block_size != lfs->cfg->block_size) {
+ LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")",
+ superblock.block_size, lfs->cfg->block_size);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+ }
+
+ // has gstate?
+ err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ // found superblock?
+ if (lfs_pair_isnull(lfs->root)) {
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ // update littlefs with gstate
+ if (!lfs_gstate_iszero(&lfs->gstate)) {
+ LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
+ lfs->gstate.tag,
+ lfs->gstate.pair[0],
+ lfs->gstate.pair[1]);
+ }
+ lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
+ lfs->gdisk = lfs->gstate;
+
+ // setup free lookahead, to distribute allocations uniformly across
+ // boots, we start the allocator at a random location
+ lfs->free.off = lfs->seed % lfs->cfg->block_count;
+ lfs_alloc_drop(lfs);
+
+ return 0;
+
+cleanup:
+ lfs_rawunmount(lfs);
+ return err;
+}
+
+static int lfs_rawunmount(lfs_t *lfs) {
+ return lfs_deinit(lfs);
+}
+
+
+/// Filesystem filesystem operations ///
+int lfs_fs_rawtraverse(lfs_t *lfs,
+ int (*cb)(void *data, lfs_block_t block), void *data,
+ bool includeorphans) {
+ // iterate over metadata pairs
+ lfs_mdir_t dir = {.tail = {0, 1}};
+
+#ifdef LFS_MIGRATE
+ // also consider v1 blocks during migration
+ if (lfs->lfs1) {
+ int err = lfs1_traverse(lfs, cb, data);
+ if (err) {
+ return err;
+ }
+
+ dir.tail[0] = lfs->root[0];
+ dir.tail[1] = lfs->root[1];
+ }
+#endif
+
+ lfs_block_t cycle = 0;
+ while (!lfs_pair_isnull(dir.tail)) {
+ if (cycle >= lfs->cfg->block_count/2) {
+ // loop detected
+ return LFS_ERR_CORRUPT;
+ }
+ cycle += 1;
+
+ for (int i = 0; i < 2; i++) {
+ int err = cb(data, dir.tail[i]);
+ if (err) {
+ return err;
+ }
+ }
+
+ // iterate through ids in directory
+ int err = lfs_dir_fetch(lfs, &dir, dir.tail);
+ if (err) {
+ return err;
+ }
+
+ for (uint16_t id = 0; id < dir.count; id++) {
+ struct lfs_ctz ctz;
+ lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
+ if (tag < 0) {
+ if (tag == LFS_ERR_NOENT) {
+ continue;
+ }
+ return tag;
+ }
+ lfs_ctz_fromle32(&ctz);
+
+ if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
+ err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
+ ctz.head, ctz.size, cb, data);
+ if (err) {
+ return err;
+ }
+ } else if (includeorphans &&
+ lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
+ for (int i = 0; i < 2; i++) {
+ err = cb(data, (&ctz.head)[i]);
+ if (err) {
+ return err;
+ }
+ }
+ }
+ }
+ }
+
+#ifndef LFS_READONLY
+ // iterate over any open files
+ for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
+ if (f->type != LFS_TYPE_REG) {
+ continue;
+ }
+
+ if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
+ int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
+ f->ctz.head, f->ctz.size, cb, data);
+ if (err) {
+ return err;
+ }
+ }
+
+ if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
+ int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
+ f->block, f->pos, cb, data);
+ if (err) {
+ return err;
+ }
+ }
+ }
+#endif
+
+ return 0;
+}
+
+#ifndef LFS_READONLY
+static int lfs_fs_pred(lfs_t *lfs,
+ const lfs_block_t pair[2], lfs_mdir_t *pdir) {
+ // iterate over all directory directory entries
+ pdir->tail[0] = 0;
+ pdir->tail[1] = 1;
+ lfs_block_t cycle = 0;
+ while (!lfs_pair_isnull(pdir->tail)) {
+ if (cycle >= lfs->cfg->block_count/2) {
+ // loop detected
+ return LFS_ERR_CORRUPT;
+ }
+ cycle += 1;
+
+ if (lfs_pair_cmp(pdir->tail, pair) == 0) {
+ return 0;
+ }
+
+ int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
+ if (err) {
+ return err;
+ }
+ }
+
+ return LFS_ERR_NOENT;
+}
+#endif
+
+#ifndef LFS_READONLY
+struct lfs_fs_parent_match {
+ lfs_t *lfs;
+ const lfs_block_t pair[2];
+};
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_parent_match(void *data,
+ lfs_tag_t tag, const void *buffer) {
+ struct lfs_fs_parent_match *find = data;
+ lfs_t *lfs = find->lfs;
+ const struct lfs_diskoff *disk = buffer;
+ (void)tag;
+
+ lfs_block_t child[2];
+ int err = lfs_bd_read(lfs,
+ &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
+ disk->block, disk->off, &child, sizeof(child));
+ if (err) {
+ return err;
+ }
+
+ lfs_pair_fromle32(child);
+ return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
+}
+#endif
+
+#ifndef LFS_READONLY
+static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
+ lfs_mdir_t *parent) {
+ // use fetchmatch with callback to find pairs
+ parent->tail[0] = 0;
+ parent->tail[1] = 1;
+ lfs_block_t cycle = 0;
+ while (!lfs_pair_isnull(parent->tail)) {
+ if (cycle >= lfs->cfg->block_count/2) {
+ // loop detected
+ return LFS_ERR_CORRUPT;
+ }
+ cycle += 1;
+
+ lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
+ LFS_MKTAG(0x7ff, 0, 0x3ff),
+ LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
+ NULL,
+ lfs_fs_parent_match, &(struct lfs_fs_parent_match){
+ lfs, {pair[0], pair[1]}});
+ if (tag && tag != LFS_ERR_NOENT) {
+ return tag;
+ }
+ }
+
+ return LFS_ERR_NOENT;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
+ LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
+ lfs->gstate.tag += orphans;
+ lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
+ ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static void lfs_fs_prepmove(lfs_t *lfs,
+ uint16_t id, const lfs_block_t pair[2]) {
+ lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
+ ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
+ lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
+ lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_demove(lfs_t *lfs) {
+ if (!lfs_gstate_hasmove(&lfs->gdisk)) {
+ return 0;
+ }
+
+ // Fix bad moves
+ LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
+ lfs->gdisk.pair[0],
+ lfs->gdisk.pair[1],
+ lfs_tag_id(lfs->gdisk.tag));
+
+ // fetch and delete the moved entry
+ lfs_mdir_t movedir;
+ int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
+ if (err) {
+ return err;
+ }
+
+ // prep gstate and delete move id
+ uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL}));
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
+ if (!lfs_gstate_hasorphans(&lfs->gstate)) {
+ return 0;
+ }
+
+ int8_t found = 0;
+restart:
+ {
+ // Fix any orphans
+ lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
+ lfs_mdir_t dir;
+
+ // iterate over all directory directory entries
+ while (!lfs_pair_isnull(pdir.tail)) {
+ int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
+ if (err) {
+ return err;
+ }
+
+ // check head blocks for orphans
+ if (!pdir.split) {
+ // check if we have a parent
+ lfs_mdir_t parent;
+ lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
+ if (tag < 0 && tag != LFS_ERR_NOENT) {
+ return tag;
+ }
+
+ // note we only check for full orphans if we may have had a
+ // power-loss, otherwise orphans are created intentionally
+ // during operations such as lfs_mkdir
+ if (tag == LFS_ERR_NOENT && powerloss) {
+ // we are an orphan
+ LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
+ pdir.tail[0], pdir.tail[1]);
+
+ // steal state
+ err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
+ if (err) {
+ return err;
+ }
+
+ // steal tail
+ lfs_pair_tole32(dir.tail);
+ int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
+ dir.tail}));
+ lfs_pair_fromle32(dir.tail);
+ if (state < 0) {
+ return state;
+ }
+
+ found += 1;
+
+ // did our commit create more orphans?
+ if (state == LFS_OK_ORPHANED) {
+ goto restart;
+ }
+
+ // refetch tail
+ continue;
+ }
+
+ if (tag != LFS_ERR_NOENT) {
+ lfs_block_t pair[2];
+ lfs_stag_t state = lfs_dir_get(lfs, &parent,
+ LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
+ if (state < 0) {
+ return state;
+ }
+ lfs_pair_fromle32(pair);
+
+ if (!lfs_pair_sync(pair, pdir.tail)) {
+ // we have desynced
+ LFS_DEBUG("Fixing half-orphan "
+ "{0x%"PRIx32", 0x%"PRIx32"} "
+ "-> {0x%"PRIx32", 0x%"PRIx32"}",
+ pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
+
+ // fix pending move in this pair? this looks like an
+ // optimization but is in fact _required_ since
+ // relocating may outdate the move.
+ uint16_t moveid = 0x3ff;
+ if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
+ moveid = lfs_tag_id(lfs->gstate.tag);
+ LFS_DEBUG("Fixing move while fixing orphans "
+ "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
+ pdir.pair[0], pdir.pair[1], moveid);
+ lfs_fs_prepmove(lfs, 0x3ff, NULL);
+ }
+
+ lfs_pair_tole32(pair);
+ state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
+ {LFS_MKTAG_IF(moveid != 0x3ff,
+ LFS_TYPE_DELETE, moveid, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
+ pair}));
+ lfs_pair_fromle32(pair);
+ if (state < 0) {
+ return state;
+ }
+
+ found += 1;
+
+ // did our commit create more orphans?
+ if (state == LFS_OK_ORPHANED) {
+ goto restart;
+ }
+
+ // refetch tail
+ continue;
+ }
+ }
+ }
+
+ pdir = dir;
+ }
+ }
+
+ // mark orphans as fixed
+ return lfs_fs_preporphans(lfs, -lfs_min(
+ lfs_gstate_getorphans(&lfs->gstate),
+ found));
+}
+#endif
+
+#ifndef LFS_READONLY
+static int lfs_fs_forceconsistency(lfs_t *lfs) {
+ int err = lfs_fs_demove(lfs);
+ if (err) {
+ return err;
+ }
+
+ err = lfs_fs_deorphan(lfs, true);
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+static int lfs_fs_size_count(void *p, lfs_block_t block) {
+ (void)block;
+ lfs_size_t *size = p;
+ *size += 1;
+ return 0;
+}
+
+static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
+ lfs_size_t size = 0;
+ int err = lfs_fs_rawtraverse(lfs, lfs_fs_size_count, &size, false);
+ if (err) {
+ return err;
+ }
+
+ return size;
+}
+
+#ifdef LFS_MIGRATE
+////// Migration from littelfs v1 below this //////
+
+/// Version info ///
+
+// Software library version
+// Major (top-nibble), incremented on backwards incompatible changes
+// Minor (bottom-nibble), incremented on feature additions
+#define LFS1_VERSION 0x00010007
+#define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
+#define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0))
+
+// Version of On-disk data structures
+// Major (top-nibble), incremented on backwards incompatible changes
+// Minor (bottom-nibble), incremented on feature additions
+#define LFS1_DISK_VERSION 0x00010001
+#define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
+#define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0))
+
+
+/// v1 Definitions ///
+
+// File types
+enum lfs1_type {
+ LFS1_TYPE_REG = 0x11,
+ LFS1_TYPE_DIR = 0x22,
+ LFS1_TYPE_SUPERBLOCK = 0x2e,
+};
+
+typedef struct lfs1 {
+ lfs_block_t root[2];
+} lfs1_t;
+
+typedef struct lfs1_entry {
+ lfs_off_t off;
+
+ struct lfs1_disk_entry {
+ uint8_t type;
+ uint8_t elen;
+ uint8_t alen;
+ uint8_t nlen;
+ union {
+ struct {
+ lfs_block_t head;
+ lfs_size_t size;
+ } file;
+ lfs_block_t dir[2];
+ } u;
+ } d;
+} lfs1_entry_t;
+
+typedef struct lfs1_dir {
+ struct lfs1_dir *next;
+ lfs_block_t pair[2];
+ lfs_off_t off;
+
+ lfs_block_t head[2];
+ lfs_off_t pos;
+
+ struct lfs1_disk_dir {
+ uint32_t rev;
+ lfs_size_t size;
+ lfs_block_t tail[2];
+ } d;
+} lfs1_dir_t;
+
+typedef struct lfs1_superblock {
+ lfs_off_t off;
+
+ struct lfs1_disk_superblock {
+ uint8_t type;
+ uint8_t elen;
+ uint8_t alen;
+ uint8_t nlen;
+ lfs_block_t root[2];
+ uint32_t block_size;
+ uint32_t block_count;
+ uint32_t version;
+ char magic[8];
+ } d;
+} lfs1_superblock_t;
+
+
+/// Low-level wrappers v1->v2 ///
+static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
+ *crc = lfs_crc(*crc, buffer, size);
+}
+
+static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size) {
+ // if we ever do more than writes to alternating pairs,
+ // this may need to consider pcache
+ return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
+ block, off, buffer, size);
+}
+
+static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
+ lfs_off_t off, lfs_size_t size, uint32_t *crc) {
+ for (lfs_off_t i = 0; i < size; i++) {
+ uint8_t c;
+ int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
+ if (err) {
+ return err;
+ }
+
+ lfs1_crc(crc, &c, 1);
+ }
+
+ return 0;
+}
+
+
+/// Endian swapping functions ///
+static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
+ d->rev = lfs_fromle32(d->rev);
+ d->size = lfs_fromle32(d->size);
+ d->tail[0] = lfs_fromle32(d->tail[0]);
+ d->tail[1] = lfs_fromle32(d->tail[1]);
+}
+
+static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
+ d->rev = lfs_tole32(d->rev);
+ d->size = lfs_tole32(d->size);
+ d->tail[0] = lfs_tole32(d->tail[0]);
+ d->tail[1] = lfs_tole32(d->tail[1]);
+}
+
+static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
+ d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
+ d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
+}
+
+static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
+ d->u.dir[0] = lfs_tole32(d->u.dir[0]);
+ d->u.dir[1] = lfs_tole32(d->u.dir[1]);
+}
+
+static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
+ d->root[0] = lfs_fromle32(d->root[0]);
+ d->root[1] = lfs_fromle32(d->root[1]);
+ d->block_size = lfs_fromle32(d->block_size);
+ d->block_count = lfs_fromle32(d->block_count);
+ d->version = lfs_fromle32(d->version);
+}
+
+
+///// Metadata pair and directory operations ///
+static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
+ return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
+}
+
+static int lfs1_dir_fetch(lfs_t *lfs,
+ lfs1_dir_t *dir, const lfs_block_t pair[2]) {
+ // copy out pair, otherwise may be aliasing dir
+ const lfs_block_t tpair[2] = {pair[0], pair[1]};
+ bool valid = false;
+
+ // check both blocks for the most recent revision
+ for (int i = 0; i < 2; i++) {
+ struct lfs1_disk_dir test;
+ int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
+ lfs1_dir_fromle32(&test);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ continue;
+ }
+ return err;
+ }
+
+ if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
+ continue;
+ }
+
+ if ((0x7fffffff & test.size) < sizeof(test)+4 ||
+ (0x7fffffff & test.size) > lfs->cfg->block_size) {
+ continue;
+ }
+
+ uint32_t crc = 0xffffffff;
+ lfs1_dir_tole32(&test);
+ lfs1_crc(&crc, &test, sizeof(test));
+ lfs1_dir_fromle32(&test);
+ err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
+ (0x7fffffff & test.size) - sizeof(test), &crc);
+ if (err) {
+ if (err == LFS_ERR_CORRUPT) {
+ continue;
+ }
+ return err;
+ }
+
+ if (crc != 0) {
+ continue;
+ }
+
+ valid = true;
+
+ // setup dir in case it's valid
+ dir->pair[0] = tpair[(i+0) % 2];
+ dir->pair[1] = tpair[(i+1) % 2];
+ dir->off = sizeof(dir->d);
+ dir->d = test;
+ }
+
+ if (!valid) {
+ LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
+ tpair[0], tpair[1]);
+ return LFS_ERR_CORRUPT;
+ }
+
+ return 0;
+}
+
+static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
+ while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
+ if (!(0x80000000 & dir->d.size)) {
+ entry->off = dir->off;
+ return LFS_ERR_NOENT;
+ }
+
+ int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
+ if (err) {
+ return err;
+ }
+
+ dir->off = sizeof(dir->d);
+ dir->pos += sizeof(dir->d) + 4;
+ }
+
+ int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
+ &entry->d, sizeof(entry->d));
+ lfs1_entry_fromle32(&entry->d);
+ if (err) {
+ return err;
+ }
+
+ entry->off = dir->off;
+ dir->off += lfs1_entry_size(entry);
+ dir->pos += lfs1_entry_size(entry);
+ return 0;
+}
+
+/// littlefs v1 specific operations ///
+int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
+ if (lfs_pair_isnull(lfs->lfs1->root)) {
+ return 0;
+ }
+
+ // iterate over metadata pairs
+ lfs1_dir_t dir;
+ lfs1_entry_t entry;
+ lfs_block_t cwd[2] = {0, 1};
+
+ while (true) {
+ for (int i = 0; i < 2; i++) {
+ int err = cb(data, cwd[i]);
+ if (err) {
+ return err;
+ }
+ }
+
+ int err = lfs1_dir_fetch(lfs, &dir, cwd);
+ if (err) {
+ return err;
+ }
+
+ // iterate over contents
+ while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
+ err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
+ &entry.d, sizeof(entry.d));
+ lfs1_entry_fromle32(&entry.d);
+ if (err) {
+ return err;
+ }
+
+ dir.off += lfs1_entry_size(&entry);
+ if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
+ err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
+ entry.d.u.file.head, entry.d.u.file.size, cb, data);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ // we also need to check if we contain a threaded v2 directory
+ lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
+ while (dir2.split) {
+ err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
+ if (err) {
+ break;
+ }
+
+ for (int i = 0; i < 2; i++) {
+ err = cb(data, dir2.pair[i]);
+ if (err) {
+ return err;
+ }
+ }
+ }
+
+ cwd[0] = dir.d.tail[0];
+ cwd[1] = dir.d.tail[1];
+
+ if (lfs_pair_isnull(cwd)) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int lfs1_moved(lfs_t *lfs, const void *e) {
+ if (lfs_pair_isnull(lfs->lfs1->root)) {
+ return 0;
+ }
+
+ // skip superblock
+ lfs1_dir_t cwd;
+ int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
+ if (err) {
+ return err;
+ }
+
+ // iterate over all directory directory entries
+ lfs1_entry_t entry;
+ while (!lfs_pair_isnull(cwd.d.tail)) {
+ err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
+ if (err) {
+ return err;
+ }
+
+ while (true) {
+ err = lfs1_dir_next(lfs, &cwd, &entry);
+ if (err && err != LFS_ERR_NOENT) {
+ return err;
+ }
+
+ if (err == LFS_ERR_NOENT) {
+ break;
+ }
+
+ if (!(0x80 & entry.d.type) &&
+ memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/// Filesystem operations ///
+static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
+ const struct lfs_config *cfg) {
+ int err = 0;
+ {
+ err = lfs_init(lfs, cfg);
+ if (err) {
+ return err;
+ }
+
+ lfs->lfs1 = lfs1;
+ lfs->lfs1->root[0] = LFS_BLOCK_NULL;
+ lfs->lfs1->root[1] = LFS_BLOCK_NULL;
+
+ // setup free lookahead
+ lfs->free.off = 0;
+ lfs->free.size = 0;
+ lfs->free.i = 0;
+ lfs_alloc_ack(lfs);
+
+ // load superblock
+ lfs1_dir_t dir;
+ lfs1_superblock_t superblock;
+ err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
+ if (err && err != LFS_ERR_CORRUPT) {
+ goto cleanup;
+ }
+
+ if (!err) {
+ err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
+ &superblock.d, sizeof(superblock.d));
+ lfs1_superblock_fromle32(&superblock.d);
+ if (err) {
+ goto cleanup;
+ }
+
+ lfs->lfs1->root[0] = superblock.d.root[0];
+ lfs->lfs1->root[1] = superblock.d.root[1];
+ }
+
+ if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
+ LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
+ 0, 1);
+ err = LFS_ERR_CORRUPT;
+ goto cleanup;
+ }
+
+ uint16_t major_version = (0xffff & (superblock.d.version >> 16));
+ uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
+ if ((major_version != LFS1_DISK_VERSION_MAJOR ||
+ minor_version > LFS1_DISK_VERSION_MINOR)) {
+ LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
+ err = LFS_ERR_INVAL;
+ goto cleanup;
+ }
+
+ return 0;
+ }
+
+cleanup:
+ lfs_deinit(lfs);
+ return err;
+}
+
+static int lfs1_unmount(lfs_t *lfs) {
+ return lfs_deinit(lfs);
+}
+
+/// v1 migration ///
+static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
+ struct lfs1 lfs1;
+ int err = lfs1_mount(lfs, &lfs1, cfg);
+ if (err) {
+ return err;
+ }
+
+ {
+ // iterate through each directory, copying over entries
+ // into new directory
+ lfs1_dir_t dir1;
+ lfs_mdir_t dir2;
+ dir1.d.tail[0] = lfs->lfs1->root[0];
+ dir1.d.tail[1] = lfs->lfs1->root[1];
+ while (!lfs_pair_isnull(dir1.d.tail)) {
+ // iterate old dir
+ err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
+ if (err) {
+ goto cleanup;
+ }
+
+ // create new dir and bind as temporary pretend root
+ err = lfs_dir_alloc(lfs, &dir2);
+ if (err) {
+ goto cleanup;
+ }
+
+ dir2.rev = dir1.d.rev;
+ dir1.head[0] = dir1.pair[0];
+ dir1.head[1] = dir1.pair[1];
+ lfs->root[0] = dir2.pair[0];
+ lfs->root[1] = dir2.pair[1];
+
+ err = lfs_dir_commit(lfs, &dir2, NULL, 0);
+ if (err) {
+ goto cleanup;
+ }
+
+ while (true) {
+ lfs1_entry_t entry1;
+ err = lfs1_dir_next(lfs, &dir1, &entry1);
+ if (err && err != LFS_ERR_NOENT) {
+ goto cleanup;
+ }
+
+ if (err == LFS_ERR_NOENT) {
+ break;
+ }
+
+ // check that entry has not been moved
+ if (entry1.d.type & 0x80) {
+ int moved = lfs1_moved(lfs, &entry1.d.u);
+ if (moved < 0) {
+ err = moved;
+ goto cleanup;
+ }
+
+ if (moved) {
+ continue;
+ }
+
+ entry1.d.type &= ~0x80;
+ }
+
+ // also fetch name
+ char name[LFS_NAME_MAX+1];
+ memset(name, 0, sizeof(name));
+ err = lfs1_bd_read(lfs, dir1.pair[0],
+ entry1.off + 4+entry1.d.elen+entry1.d.alen,
+ name, entry1.d.nlen);
+ if (err) {
+ goto cleanup;
+ }
+
+ bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
+
+ // create entry in new dir
+ err = lfs_dir_fetch(lfs, &dir2, lfs->root);
+ if (err) {
+ goto cleanup;
+ }
+
+ uint16_t id;
+ err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
+ if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
+ err = (err < 0) ? err : LFS_ERR_EXIST;
+ goto cleanup;
+ }
+
+ lfs1_entry_tole32(&entry1.d);
+ err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
+ {LFS_MKTAG_IF_ELSE(isdir,
+ LFS_TYPE_DIR, id, entry1.d.nlen,
+ LFS_TYPE_REG, id, entry1.d.nlen),
+ name},
+ {LFS_MKTAG_IF_ELSE(isdir,
+ LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
+ LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
+ &entry1.d.u}));
+ lfs1_entry_fromle32(&entry1.d);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ if (!lfs_pair_isnull(dir1.d.tail)) {
+ // find last block and update tail to thread into fs
+ err = lfs_dir_fetch(lfs, &dir2, lfs->root);
+ if (err) {
+ goto cleanup;
+ }
+
+ while (dir2.split) {
+ err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ lfs_pair_tole32(dir2.pair);
+ err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
+ lfs_pair_fromle32(dir2.pair);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ // Copy over first block to thread into fs. Unfortunately
+ // if this fails there is not much we can do.
+ LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
+ "-> {0x%"PRIx32", 0x%"PRIx32"}",
+ lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
+
+ err = lfs_bd_erase(lfs, dir1.head[1]);
+ if (err) {
+ goto cleanup;
+ }
+
+ err = lfs_dir_fetch(lfs, &dir2, lfs->root);
+ if (err) {
+ goto cleanup;
+ }
+
+ for (lfs_off_t i = 0; i < dir2.off; i++) {
+ uint8_t dat;
+ err = lfs_bd_read(lfs,
+ NULL, &lfs->rcache, dir2.off,
+ dir2.pair[0], i, &dat, 1);
+ if (err) {
+ goto cleanup;
+ }
+
+ err = lfs_bd_prog(lfs,
+ &lfs->pcache, &lfs->rcache, true,
+ dir1.head[1], i, &dat, 1);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+ // Create new superblock. This marks a successful migration!
+ err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
+ if (err) {
+ goto cleanup;
+ }
+
+ dir2.pair[0] = dir1.pair[0];
+ dir2.pair[1] = dir1.pair[1];
+ dir2.rev = dir1.d.rev;
+ dir2.off = sizeof(dir2.rev);
+ dir2.etag = 0xffffffff;
+ dir2.count = 0;
+ dir2.tail[0] = lfs->lfs1->root[0];
+ dir2.tail[1] = lfs->lfs1->root[1];
+ dir2.erased = false;
+ dir2.split = true;
+
+ lfs_superblock_t superblock = {
+ .version = LFS_DISK_VERSION,
+ .block_size = lfs->cfg->block_size,
+ .block_count = lfs->cfg->block_count,
+ .name_max = lfs->name_max,
+ .file_max = lfs->file_max,
+ .attr_max = lfs->attr_max,
+ };
+
+ lfs_superblock_tole32(&superblock);
+ err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
+ {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
+ {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
+ &superblock}));
+ if (err) {
+ goto cleanup;
+ }
+
+ // sanity check that fetch works
+ err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
+ if (err) {
+ goto cleanup;
+ }
+
+ // force compaction to prevent accidentally mounting v1
+ dir2.erased = false;
+ err = lfs_dir_commit(lfs, &dir2, NULL, 0);
+ if (err) {
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ lfs1_unmount(lfs);
+ return err;
+}
+
+#endif
+
+
+/// Public API wrappers ///
+
+// Here we can add tracing/thread safety easily
+
+// Thread-safe wrappers if enabled
+#ifdef LFS_THREADSAFE
+#define LFS_LOCK(cfg) cfg->lock(cfg)
+#define LFS_UNLOCK(cfg) cfg->unlock(cfg)
+#else
+#define LFS_LOCK(cfg) ((void)cfg, 0)
+#define LFS_UNLOCK(cfg) ((void)cfg)
+#endif
+
+// Public API
+#ifndef LFS_READONLY
+int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = LFS_LOCK(cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_format(%p, %p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32", "
+ ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
+ ".lookahead_size=%"PRIu32", .read_buffer=%p, "
+ ".prog_buffer=%p, .lookahead_buffer=%p, "
+ ".name_max=%"PRIu32", .file_max=%"PRIu32", "
+ ".attr_max=%"PRIu32"})",
+ (void*)lfs, (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
+ cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
+ cfg->name_max, cfg->file_max, cfg->attr_max);
+
+ err = lfs_rawformat(lfs, cfg);
+
+ LFS_TRACE("lfs_format -> %d", err);
+ LFS_UNLOCK(cfg);
+ return err;
+}
+#endif
+
+int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = LFS_LOCK(cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32", "
+ ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
+ ".lookahead_size=%"PRIu32", .read_buffer=%p, "
+ ".prog_buffer=%p, .lookahead_buffer=%p, "
+ ".name_max=%"PRIu32", .file_max=%"PRIu32", "
+ ".attr_max=%"PRIu32"})",
+ (void*)lfs, (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
+ cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
+ cfg->name_max, cfg->file_max, cfg->attr_max);
+
+ err = lfs_rawmount(lfs, cfg);
+
+ LFS_TRACE("lfs_mount -> %d", err);
+ LFS_UNLOCK(cfg);
+ return err;
+}
+
+int lfs_unmount(lfs_t *lfs) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
+
+ err = lfs_rawunmount(lfs);
+
+ LFS_TRACE("lfs_unmount -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+#ifndef LFS_READONLY
+int lfs_remove(lfs_t *lfs, const char *path) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
+
+ err = lfs_rawremove(lfs, path);
+
+ LFS_TRACE("lfs_remove -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+#ifndef LFS_READONLY
+int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
+
+ err = lfs_rawrename(lfs, oldpath, newpath);
+
+ LFS_TRACE("lfs_rename -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
+
+ err = lfs_rawstat(lfs, path, info);
+
+ LFS_TRACE("lfs_stat -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
+ uint8_t type, void *buffer, lfs_size_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
+ (void*)lfs, path, type, buffer, size);
+
+ lfs_ssize_t res = lfs_rawgetattr(lfs, path, type, buffer, size);
+
+ LFS_TRACE("lfs_getattr -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+#ifndef LFS_READONLY
+int lfs_setattr(lfs_t *lfs, const char *path,
+ uint8_t type, const void *buffer, lfs_size_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
+ (void*)lfs, path, type, buffer, size);
+
+ err = lfs_rawsetattr(lfs, path, type, buffer, size);
+
+ LFS_TRACE("lfs_setattr -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+#ifndef LFS_READONLY
+int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
+
+ err = lfs_rawremoveattr(lfs, path, type);
+
+ LFS_TRACE("lfs_removeattr -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+#ifndef LFS_NO_MALLOC
+int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
+ (void*)lfs, (void*)file, path, flags);
+ LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_rawopen(lfs, file, path, flags);
+
+ LFS_TRACE("lfs_file_open -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags,
+ const struct lfs_file_config *cfg) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
+ ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
+ (void*)lfs, (void*)file, path, flags,
+ (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
+ LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_rawopencfg(lfs, file, path, flags, cfg);
+
+ LFS_TRACE("lfs_file_opencfg -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_rawclose(lfs, file);
+
+ LFS_TRACE("lfs_file_close -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+#ifndef LFS_READONLY
+int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_rawsync(lfs, file);
+
+ LFS_TRACE("lfs_file_sync -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
+ (void*)lfs, (void*)file, buffer, size);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_ssize_t res = lfs_file_rawread(lfs, file, buffer, size);
+
+ LFS_TRACE("lfs_file_read -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+#ifndef LFS_READONLY
+lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
+ (void*)lfs, (void*)file, buffer, size);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_ssize_t res = lfs_file_rawwrite(lfs, file, buffer, size);
+
+ LFS_TRACE("lfs_file_write -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+#endif
+
+lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
+ lfs_soff_t off, int whence) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
+ (void*)lfs, (void*)file, off, whence);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_soff_t res = lfs_file_rawseek(lfs, file, off, whence);
+
+ LFS_TRACE("lfs_file_seek -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+#ifndef LFS_READONLY
+int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
+ (void*)lfs, (void*)file, size);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ err = lfs_file_rawtruncate(lfs, file, size);
+
+ LFS_TRACE("lfs_file_truncate -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_soff_t res = lfs_file_rawtell(lfs, file);
+
+ LFS_TRACE("lfs_file_tell -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
+
+ err = lfs_file_rawrewind(lfs, file);
+
+ LFS_TRACE("lfs_file_rewind -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
+ LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
+
+ lfs_soff_t res = lfs_file_rawsize(lfs, file);
+
+ LFS_TRACE("lfs_file_size -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+#ifndef LFS_READONLY
+int lfs_mkdir(lfs_t *lfs, const char *path) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
+
+ err = lfs_rawmkdir(lfs, path);
+
+ LFS_TRACE("lfs_mkdir -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+#endif
+
+int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
+ LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir));
+
+ err = lfs_dir_rawopen(lfs, dir, path);
+
+ LFS_TRACE("lfs_dir_open -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
+
+ err = lfs_dir_rawclose(lfs, dir);
+
+ LFS_TRACE("lfs_dir_close -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_read(%p, %p, %p)",
+ (void*)lfs, (void*)dir, (void*)info);
+
+ err = lfs_dir_rawread(lfs, dir, info);
+
+ LFS_TRACE("lfs_dir_read -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
+ (void*)lfs, (void*)dir, off);
+
+ err = lfs_dir_rawseek(lfs, dir, off);
+
+ LFS_TRACE("lfs_dir_seek -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
+
+ lfs_soff_t res = lfs_dir_rawtell(lfs, dir);
+
+ LFS_TRACE("lfs_dir_tell -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
+
+ err = lfs_dir_rawrewind(lfs, dir);
+
+ LFS_TRACE("lfs_dir_rewind -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
+
+ lfs_ssize_t res = lfs_fs_rawsize(lfs);
+
+ LFS_TRACE("lfs_fs_size -> %"PRId32, res);
+ LFS_UNLOCK(lfs->cfg);
+ return res;
+}
+
+int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
+ int err = LFS_LOCK(lfs->cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
+ (void*)lfs, (void*)(uintptr_t)cb, data);
+
+ err = lfs_fs_rawtraverse(lfs, cb, data, true);
+
+ LFS_TRACE("lfs_fs_traverse -> %d", err);
+ LFS_UNLOCK(lfs->cfg);
+ return err;
+}
+
+#ifdef LFS_MIGRATE
+int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
+ int err = LFS_LOCK(cfg);
+ if (err) {
+ return err;
+ }
+ LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
+ ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
+ ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
+ ".block_size=%"PRIu32", .block_count=%"PRIu32", "
+ ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
+ ".lookahead_size=%"PRIu32", .read_buffer=%p, "
+ ".prog_buffer=%p, .lookahead_buffer=%p, "
+ ".name_max=%"PRIu32", .file_max=%"PRIu32", "
+ ".attr_max=%"PRIu32"})",
+ (void*)lfs, (void*)cfg, cfg->context,
+ (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
+ (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
+ cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
+ cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
+ cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
+ cfg->name_max, cfg->file_max, cfg->attr_max);
+
+ err = lfs_rawmigrate(lfs, cfg);
+
+ LFS_TRACE("lfs_migrate -> %d", err);
+ LFS_UNLOCK(cfg);
+ return err;
+}
+#endif
+
diff --git a/components/fs/littlefs/littlefs/lfs.h b/components/fs/littlefs/littlefs/lfs.h
new file mode 100644
index 00000000..2bce17f5
--- /dev/null
+++ b/components/fs/littlefs/littlefs/lfs.h
@@ -0,0 +1,701 @@
+/*
+ * The little filesystem
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef LFS_H
+#define LFS_H
+
+#include
+#include
+#include "lfs_util.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/// Version info ///
+
+// Software library version
+// Major (top-nibble), incremented on backwards incompatible changes
+// Minor (bottom-nibble), incremented on feature additions
+#define LFS_VERSION 0x00020005
+#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
+#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
+
+// Version of On-disk data structures
+// Major (top-nibble), incremented on backwards incompatible changes
+// Minor (bottom-nibble), incremented on feature additions
+#define LFS_DISK_VERSION 0x00020000
+#define LFS_DISK_VERSION_MAJOR (0xffff & (LFS_DISK_VERSION >> 16))
+#define LFS_DISK_VERSION_MINOR (0xffff & (LFS_DISK_VERSION >> 0))
+
+
+/// Definitions ///
+
+// Type definitions
+typedef uint32_t lfs_size_t;
+typedef uint32_t lfs_off_t;
+
+typedef int32_t lfs_ssize_t;
+typedef int32_t lfs_soff_t;
+
+typedef uint32_t lfs_block_t;
+
+// Maximum name size in bytes, may be redefined to reduce the size of the
+// info struct. Limited to <= 1022. Stored in superblock and must be
+// respected by other littlefs drivers.
+#ifndef LFS_NAME_MAX
+#define LFS_NAME_MAX 255
+#endif
+
+// Maximum size of a file in bytes, may be redefined to limit to support other
+// drivers. Limited on disk to <= 4294967296. However, above 2147483647 the
+// functions lfs_file_seek, lfs_file_size, and lfs_file_tell will return
+// incorrect values due to using signed integers. Stored in superblock and
+// must be respected by other littlefs drivers.
+#ifndef LFS_FILE_MAX
+#define LFS_FILE_MAX 2147483647
+#endif
+
+// Maximum size of custom attributes in bytes, may be redefined, but there is
+// no real benefit to using a smaller LFS_ATTR_MAX. Limited to <= 1022.
+#ifndef LFS_ATTR_MAX
+#define LFS_ATTR_MAX 1022
+#endif
+
+// Possible error codes, these are negative to allow
+// valid positive return values
+enum lfs_error {
+ LFS_ERR_OK = 0, // No error
+ LFS_ERR_IO = -5, // Error during device operation
+ LFS_ERR_CORRUPT = -84, // Corrupted
+ LFS_ERR_NOENT = -2, // No directory entry
+ LFS_ERR_EXIST = -17, // Entry already exists
+ LFS_ERR_NOTDIR = -20, // Entry is not a dir
+ LFS_ERR_ISDIR = -21, // Entry is a dir
+ LFS_ERR_NOTEMPTY = -39, // Dir is not empty
+ LFS_ERR_BADF = -9, // Bad file number
+ LFS_ERR_FBIG = -27, // File too large
+ LFS_ERR_INVAL = -22, // Invalid parameter
+ LFS_ERR_NOSPC = -28, // No space left on device
+ LFS_ERR_NOMEM = -12, // No more memory available
+ LFS_ERR_NOATTR = -61, // No data/attr available
+ LFS_ERR_NAMETOOLONG = -36, // File name too long
+};
+
+// File types
+enum lfs_type {
+ // file types
+ LFS_TYPE_REG = 0x001,
+ LFS_TYPE_DIR = 0x002,
+
+ // internally used types
+ LFS_TYPE_SPLICE = 0x400,
+ LFS_TYPE_NAME = 0x000,
+ LFS_TYPE_STRUCT = 0x200,
+ LFS_TYPE_USERATTR = 0x300,
+ LFS_TYPE_FROM = 0x100,
+ LFS_TYPE_TAIL = 0x600,
+ LFS_TYPE_GLOBALS = 0x700,
+ LFS_TYPE_CRC = 0x500,
+
+ // internally used type specializations
+ LFS_TYPE_CREATE = 0x401,
+ LFS_TYPE_DELETE = 0x4ff,
+ LFS_TYPE_SUPERBLOCK = 0x0ff,
+ LFS_TYPE_DIRSTRUCT = 0x200,
+ LFS_TYPE_CTZSTRUCT = 0x202,
+ LFS_TYPE_INLINESTRUCT = 0x201,
+ LFS_TYPE_SOFTTAIL = 0x600,
+ LFS_TYPE_HARDTAIL = 0x601,
+ LFS_TYPE_MOVESTATE = 0x7ff,
+
+ // internal chip sources
+ LFS_FROM_NOOP = 0x000,
+ LFS_FROM_MOVE = 0x101,
+ LFS_FROM_USERATTRS = 0x102,
+};
+
+// File open flags
+enum lfs_open_flags {
+ // open flags
+ LFS_O_RDONLY = 1, // Open a file as read only
+#ifndef LFS_READONLY
+ LFS_O_WRONLY = 2, // Open a file as write only
+ LFS_O_RDWR = 3, // Open a file as read and write
+ LFS_O_CREAT = 0x0100, // Create a file if it does not exist
+ LFS_O_EXCL = 0x0200, // Fail if a file already exists
+ LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size
+ LFS_O_APPEND = 0x0800, // Move to end of file on every write
+#endif
+
+ // internally used flags
+#ifndef LFS_READONLY
+ LFS_F_DIRTY = 0x010000, // File does not match storage
+ LFS_F_WRITING = 0x020000, // File has been written since last flush
+#endif
+ LFS_F_READING = 0x040000, // File has been read since last flush
+#ifndef LFS_READONLY
+ LFS_F_ERRED = 0x080000, // An error occurred during write
+#endif
+ LFS_F_INLINE = 0x100000, // Currently inlined in directory entry
+};
+
+// File seek flags
+enum lfs_whence_flags {
+ LFS_SEEK_SET = 0, // Seek relative to an absolute position
+ LFS_SEEK_CUR = 1, // Seek relative to the current file position
+ LFS_SEEK_END = 2, // Seek relative to the end of the file
+};
+
+
+// Configuration provided during initialization of the littlefs
+struct lfs_config {
+ // Opaque user provided context that can be used to pass
+ // information to the block device operations
+ void *context;
+
+ // Read a region in a block. Negative error codes are propagated
+ // to the user.
+ int (*read)(const struct lfs_config *c, lfs_block_t block,
+ lfs_off_t off, void *buffer, lfs_size_t size);
+
+ // Program a region in a block. The block must have previously
+ // been erased. Negative error codes are propagated to the user.
+ // May return LFS_ERR_CORRUPT if the block should be considered bad.
+ int (*prog)(const struct lfs_config *c, lfs_block_t block,
+ lfs_off_t off, const void *buffer, lfs_size_t size);
+
+ // Erase a block. A block must be erased before being programmed.
+ // The state of an erased block is undefined. Negative error codes
+ // are propagated to the user.
+ // May return LFS_ERR_CORRUPT if the block should be considered bad.
+ int (*erase)(const struct lfs_config *c, lfs_block_t block);
+
+ // Sync the state of the underlying block device. Negative error codes
+ // are propagated to the user.
+ int (*sync)(const struct lfs_config *c);
+
+#ifdef LFS_THREADSAFE
+ // Lock the underlying block device. Negative error codes
+ // are propagated to the user.
+ int (*lock)(const struct lfs_config *c);
+
+ // Unlock the underlying block device. Negative error codes
+ // are propagated to the user.
+ int (*unlock)(const struct lfs_config *c);
+#endif
+
+ // Minimum size of a block read in bytes. All read operations will be a
+ // multiple of this value.
+ lfs_size_t read_size;
+
+ // Minimum size of a block program in bytes. All program operations will be
+ // a multiple of this value.
+ lfs_size_t prog_size;
+
+ // Size of an erasable block in bytes. This does not impact ram consumption
+ // and may be larger than the physical erase size. However, non-inlined
+ // files take up at minimum one block. Must be a multiple of the read and
+ // program sizes.
+ lfs_size_t block_size;
+
+ // Number of erasable blocks on the device.
+ lfs_size_t block_count;
+
+ // Number of erase cycles before littlefs evicts metadata logs and moves
+ // the metadata to another block. Suggested values are in the
+ // range 100-1000, with large values having better performance at the cost
+ // of less consistent wear distribution.
+ //
+ // Set to -1 to disable block-level wear-leveling.
+ int32_t block_cycles;
+
+ // Size of block caches in bytes. Each cache buffers a portion of a block in
+ // RAM. The littlefs needs a read cache, a program cache, and one additional
+ // cache per file. Larger caches can improve performance by storing more
+ // data and reducing the number of disk accesses. Must be a multiple of the
+ // read and program sizes, and a factor of the block size.
+ lfs_size_t cache_size;
+
+ // Size of the lookahead buffer in bytes. A larger lookahead buffer
+ // increases the number of blocks found during an allocation pass. The
+ // lookahead buffer is stored as a compact bitmap, so each byte of RAM
+ // can track 8 blocks. Must be a multiple of 8.
+ lfs_size_t lookahead_size;
+
+ // Optional statically allocated read buffer. Must be cache_size.
+ // By default lfs_malloc is used to allocate this buffer.
+ void *read_buffer;
+
+ // Optional statically allocated program buffer. Must be cache_size.
+ // By default lfs_malloc is used to allocate this buffer.
+ void *prog_buffer;
+
+ // Optional statically allocated lookahead buffer. Must be lookahead_size
+ // and aligned to a 32-bit boundary. By default lfs_malloc is used to
+ // allocate this buffer.
+ void *lookahead_buffer;
+
+ // Optional upper limit on length of file names in bytes. No downside for
+ // larger names except the size of the info struct which is controlled by
+ // the LFS_NAME_MAX define. Defaults to LFS_NAME_MAX when zero. Stored in
+ // superblock and must be respected by other littlefs drivers.
+ lfs_size_t name_max;
+
+ // Optional upper limit on files in bytes. No downside for larger files
+ // but must be <= LFS_FILE_MAX. Defaults to LFS_FILE_MAX when zero. Stored
+ // in superblock and must be respected by other littlefs drivers.
+ lfs_size_t file_max;
+
+ // Optional upper limit on custom attributes in bytes. No downside for
+ // larger attributes size but must be <= LFS_ATTR_MAX. Defaults to
+ // LFS_ATTR_MAX when zero.
+ lfs_size_t attr_max;
+
+ // Optional upper limit on total space given to metadata pairs in bytes. On
+ // devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB)
+ // can help bound the metadata compaction time. Must be <= block_size.
+ // Defaults to block_size when zero.
+ lfs_size_t metadata_max;
+};
+
+// File info structure
+struct lfs_info {
+ // Type of the file, either LFS_TYPE_REG or LFS_TYPE_DIR
+ uint8_t type;
+
+ // Size of the file, only valid for REG files. Limited to 32-bits.
+ lfs_size_t size;
+
+ // Name of the file stored as a null-terminated string. Limited to
+ // LFS_NAME_MAX+1, which can be changed by redefining LFS_NAME_MAX to
+ // reduce RAM. LFS_NAME_MAX is stored in superblock and must be
+ // respected by other littlefs drivers.
+ char name[LFS_NAME_MAX+1];
+};
+
+// Custom attribute structure, used to describe custom attributes
+// committed atomically during file writes.
+struct lfs_attr {
+ // 8-bit type of attribute, provided by user and used to
+ // identify the attribute
+ uint8_t type;
+
+ // Pointer to buffer containing the attribute
+ void *buffer;
+
+ // Size of attribute in bytes, limited to LFS_ATTR_MAX
+ lfs_size_t size;
+};
+
+// Optional configuration provided during lfs_file_opencfg
+struct lfs_file_config {
+ // Optional statically allocated file buffer. Must be cache_size.
+ // By default lfs_malloc is used to allocate this buffer.
+ void *buffer;
+
+ // Optional list of custom attributes related to the file. If the file
+ // is opened with read access, these attributes will be read from disk
+ // during the open call. If the file is opened with write access, the
+ // attributes will be written to disk every file sync or close. This
+ // write occurs atomically with update to the file's contents.
+ //
+ // Custom attributes are uniquely identified by an 8-bit type and limited
+ // to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller
+ // than the buffer, it will be padded with zeros. If the stored attribute
+ // is larger, then it will be silently truncated. If the attribute is not
+ // found, it will be created implicitly.
+ struct lfs_attr *attrs;
+
+ // Number of custom attributes in the list
+ lfs_size_t attr_count;
+};
+
+
+/// internal littlefs data structures ///
+typedef struct lfs_cache {
+ lfs_block_t block;
+ lfs_off_t off;
+ lfs_size_t size;
+ uint8_t *buffer;
+} lfs_cache_t;
+
+typedef struct lfs_mdir {
+ lfs_block_t pair[2];
+ uint32_t rev;
+ lfs_off_t off;
+ uint32_t etag;
+ uint16_t count;
+ bool erased;
+ bool split;
+ lfs_block_t tail[2];
+} lfs_mdir_t;
+
+// littlefs directory type
+typedef struct lfs_dir {
+ struct lfs_dir *next;
+ uint16_t id;
+ uint8_t type;
+ lfs_mdir_t m;
+
+ lfs_off_t pos;
+ lfs_block_t head[2];
+} lfs_dir_t;
+
+// littlefs file type
+typedef struct lfs_file {
+ struct lfs_file *next;
+ uint16_t id;
+ uint8_t type;
+ lfs_mdir_t m;
+
+ struct lfs_ctz {
+ lfs_block_t head;
+ lfs_size_t size;
+ } ctz;
+
+ uint32_t flags;
+ lfs_off_t pos;
+ lfs_block_t block;
+ lfs_off_t off;
+ lfs_cache_t cache;
+
+ const struct lfs_file_config *cfg;
+} lfs_file_t;
+
+typedef struct lfs_superblock {
+ uint32_t version;
+ lfs_size_t block_size;
+ lfs_size_t block_count;
+ lfs_size_t name_max;
+ lfs_size_t file_max;
+ lfs_size_t attr_max;
+} lfs_superblock_t;
+
+typedef struct lfs_gstate {
+ uint32_t tag;
+ lfs_block_t pair[2];
+} lfs_gstate_t;
+
+// The littlefs filesystem type
+typedef struct lfs {
+ lfs_cache_t rcache;
+ lfs_cache_t pcache;
+
+ lfs_block_t root[2];
+ struct lfs_mlist {
+ struct lfs_mlist *next;
+ uint16_t id;
+ uint8_t type;
+ lfs_mdir_t m;
+ } *mlist;
+ uint32_t seed;
+
+ lfs_gstate_t gstate;
+ lfs_gstate_t gdisk;
+ lfs_gstate_t gdelta;
+
+ struct lfs_free {
+ lfs_block_t off;
+ lfs_block_t size;
+ lfs_block_t i;
+ lfs_block_t ack;
+ uint32_t *buffer;
+ } free;
+
+ const struct lfs_config *cfg;
+ lfs_size_t name_max;
+ lfs_size_t file_max;
+ lfs_size_t attr_max;
+
+#ifdef LFS_MIGRATE
+ struct lfs1 *lfs1;
+#endif
+} lfs_t;
+
+
+/// Filesystem functions ///
+
+#ifndef LFS_READONLY
+// Format a block device with the littlefs
+//
+// Requires a littlefs object and config struct. This clobbers the littlefs
+// object, and does not leave the filesystem mounted. The config struct must
+// be zeroed for defaults and backwards compatibility.
+//
+// Returns a negative error code on failure.
+int lfs_format(lfs_t *lfs, const struct lfs_config *config);
+#endif
+
+// Mounts a littlefs
+//
+// Requires a littlefs object and config struct. Multiple filesystems
+// may be mounted simultaneously with multiple littlefs objects. Both
+// lfs and config must be allocated while mounted. The config struct must
+// be zeroed for defaults and backwards compatibility.
+//
+// Returns a negative error code on failure.
+int lfs_mount(lfs_t *lfs, const struct lfs_config *config);
+
+// Unmounts a littlefs
+//
+// Does nothing besides releasing any allocated resources.
+// Returns a negative error code on failure.
+int lfs_unmount(lfs_t *lfs);
+
+/// General operations ///
+
+#ifndef LFS_READONLY
+// Removes a file or directory
+//
+// If removing a directory, the directory must be empty.
+// Returns a negative error code on failure.
+int lfs_remove(lfs_t *lfs, const char *path);
+#endif
+
+#ifndef LFS_READONLY
+// Rename or move a file or directory
+//
+// If the destination exists, it must match the source in type.
+// If the destination is a directory, the directory must be empty.
+//
+// Returns a negative error code on failure.
+int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath);
+#endif
+
+// Find info about a file or directory
+//
+// Fills out the info structure, based on the specified file or directory.
+// Returns a negative error code on failure.
+int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info);
+
+// Get a custom attribute
+//
+// Custom attributes are uniquely identified by an 8-bit type and limited
+// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller than
+// the buffer, it will be padded with zeros. If the stored attribute is larger,
+// then it will be silently truncated. If no attribute is found, the error
+// LFS_ERR_NOATTR is returned and the buffer is filled with zeros.
+//
+// Returns the size of the attribute, or a negative error code on failure.
+// Note, the returned size is the size of the attribute on disk, irrespective
+// of the size of the buffer. This can be used to dynamically allocate a buffer
+// or check for existence.
+lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
+ uint8_t type, void *buffer, lfs_size_t size);
+
+#ifndef LFS_READONLY
+// Set custom attributes
+//
+// Custom attributes are uniquely identified by an 8-bit type and limited
+// to LFS_ATTR_MAX bytes. If an attribute is not found, it will be
+// implicitly created.
+//
+// Returns a negative error code on failure.
+int lfs_setattr(lfs_t *lfs, const char *path,
+ uint8_t type, const void *buffer, lfs_size_t size);
+#endif
+
+#ifndef LFS_READONLY
+// Removes a custom attribute
+//
+// If an attribute is not found, nothing happens.
+//
+// Returns a negative error code on failure.
+int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
+#endif
+
+
+/// File operations ///
+
+#ifndef LFS_NO_MALLOC
+// Open a file
+//
+// The mode that the file is opened in is determined by the flags, which
+// are values from the enum lfs_open_flags that are bitwise-ored together.
+//
+// Returns a negative error code on failure.
+int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags);
+
+// if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM
+// thus use lfs_file_opencfg() with config.buffer set.
+#endif
+
+// Open a file with extra configuration
+//
+// The mode that the file is opened in is determined by the flags, which
+// are values from the enum lfs_open_flags that are bitwise-ored together.
+//
+// The config struct provides additional config options per file as described
+// above. The config struct must remain allocated while the file is open, and
+// the config struct must be zeroed for defaults and backwards compatibility.
+//
+// Returns a negative error code on failure.
+int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
+ const char *path, int flags,
+ const struct lfs_file_config *config);
+
+// Close a file
+//
+// Any pending writes are written out to storage as though
+// sync had been called and releases any allocated resources.
+//
+// Returns a negative error code on failure.
+int lfs_file_close(lfs_t *lfs, lfs_file_t *file);
+
+// Synchronize a file on storage
+//
+// Any pending writes are written out to storage.
+// Returns a negative error code on failure.
+int lfs_file_sync(lfs_t *lfs, lfs_file_t *file);
+
+// Read data from file
+//
+// Takes a buffer and size indicating where to store the read data.
+// Returns the number of bytes read, or a negative error code on failure.
+lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
+ void *buffer, lfs_size_t size);
+
+#ifndef LFS_READONLY
+// Write data to file
+//
+// Takes a buffer and size indicating the data to write. The file will not
+// actually be updated on the storage until either sync or close is called.
+//
+// Returns the number of bytes written, or a negative error code on failure.
+lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
+ const void *buffer, lfs_size_t size);
+#endif
+
+// Change the position of the file
+//
+// The change in position is determined by the offset and whence flag.
+// Returns the new position of the file, or a negative error code on failure.
+lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
+ lfs_soff_t off, int whence);
+
+#ifndef LFS_READONLY
+// Truncates the size of the file to the specified size
+//
+// Returns a negative error code on failure.
+int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size);
+#endif
+
+// Return the position of the file
+//
+// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_CUR)
+// Returns the position of the file, or a negative error code on failure.
+lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file);
+
+// Change the position of the file to the beginning of the file
+//
+// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_SET)
+// Returns a negative error code on failure.
+int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file);
+
+// Return the size of the file
+//
+// Similar to lfs_file_seek(lfs, file, 0, LFS_SEEK_END)
+// Returns the size of the file, or a negative error code on failure.
+lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file);
+
+
+/// Directory operations ///
+
+#ifndef LFS_READONLY
+// Create a directory
+//
+// Returns a negative error code on failure.
+int lfs_mkdir(lfs_t *lfs, const char *path);
+#endif
+
+// Open a directory
+//
+// Once open a directory can be used with read to iterate over files.
+// Returns a negative error code on failure.
+int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path);
+
+// Close a directory
+//
+// Releases any allocated resources.
+// Returns a negative error code on failure.
+int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir);
+
+// Read an entry in the directory
+//
+// Fills out the info structure, based on the specified file or directory.
+// Returns a positive value on success, 0 at the end of directory,
+// or a negative error code on failure.
+int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info);
+
+// Change the position of the directory
+//
+// The new off must be a value previous returned from tell and specifies
+// an absolute offset in the directory seek.
+//
+// Returns a negative error code on failure.
+int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off);
+
+// Return the position of the directory
+//
+// The returned offset is only meant to be consumed by seek and may not make
+// sense, but does indicate the current position in the directory iteration.
+//
+// Returns the position of the directory, or a negative error code on failure.
+lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir);
+
+// Change the position of the directory to the beginning of the directory
+//
+// Returns a negative error code on failure.
+int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir);
+
+
+/// Filesystem-level filesystem operations
+
+// Finds the current size of the filesystem
+//
+// Note: Result is best effort. If files share COW structures, the returned
+// size may be larger than the filesystem actually is.
+//
+// Returns the number of allocated blocks, or a negative error code on failure.
+lfs_ssize_t lfs_fs_size(lfs_t *lfs);
+
+// Traverse through all blocks in use by the filesystem
+//
+// The provided callback will be called with each block address that is
+// currently in use by the filesystem. This can be used to determine which
+// blocks are in use or how much of the storage is available.
+//
+// Returns a negative error code on failure.
+int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
+
+#ifndef LFS_READONLY
+#ifdef LFS_MIGRATE
+// Attempts to migrate a previous version of littlefs
+//
+// Behaves similarly to the lfs_format function. Attempts to mount
+// the previous version of littlefs and update the filesystem so it can be
+// mounted with the current version of littlefs.
+//
+// Requires a littlefs object and config struct. This clobbers the littlefs
+// object, and does not leave the filesystem mounted. The config struct must
+// be zeroed for defaults and backwards compatibility.
+//
+// Returns a negative error code on failure.
+int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg);
+#endif
+#endif
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/components/fs/littlefs/littlefs/lfs_util.c b/components/fs/littlefs/littlefs/lfs_util.c
new file mode 100644
index 00000000..9cdd1c60
--- /dev/null
+++ b/components/fs/littlefs/littlefs/lfs_util.c
@@ -0,0 +1,34 @@
+/*
+ * lfs util functions
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "lfs_util.h"
+
+// Only compile if user does not provide custom config
+#ifndef LFS_CONFIG
+
+
+// Software CRC implementation with small lookup table
+uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
+ static const uint32_t rtable[16] = {
+ 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
+ 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
+ 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
+ 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c,
+ };
+
+ const uint8_t *data = buffer;
+
+ for (size_t i = 0; i < size; i++) {
+ crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf];
+ crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf];
+ }
+
+ return crc;
+}
+
+
+#endif
diff --git a/components/fs/littlefs/littlefs/lfs_util.h b/components/fs/littlefs/littlefs/lfs_util.h
new file mode 100644
index 00000000..2fd7ad45
--- /dev/null
+++ b/components/fs/littlefs/littlefs/lfs_util.h
@@ -0,0 +1,243 @@
+/*
+ * lfs utility functions
+ *
+ * Copyright (c) 2022, The littlefs authors.
+ * Copyright (c) 2017, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef LFS_UTIL_H
+#define LFS_UTIL_H
+
+// Users can override lfs_util.h with their own configuration by defining
+// LFS_CONFIG as a header file to include (-DLFS_CONFIG=lfs_config.h).
+//
+// If LFS_CONFIG is used, none of the default utils will be emitted and must be
+// provided by the config file. To start, I would suggest copying lfs_util.h
+// and modifying as needed.
+#ifdef LFS_CONFIG
+#define LFS_STRINGIZE(x) LFS_STRINGIZE2(x)
+#define LFS_STRINGIZE2(x) #x
+#include LFS_STRINGIZE(LFS_CONFIG)
+#else
+
+// System includes
+#include
+#include
+#include
+#include
+
+#ifndef LFS_NO_MALLOC
+#include
+#endif
+#ifndef LFS_NO_ASSERT
+#include
+#endif
+#if !defined(LFS_NO_DEBUG) || \
+ !defined(LFS_NO_WARN) || \
+ !defined(LFS_NO_ERROR) || \
+ defined(LFS_YES_TRACE)
+#include
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+// Macros, may be replaced by system specific wrappers. Arguments to these
+// macros must not have side-effects as the macros can be removed for a smaller
+// code footprint
+
+// Logging functions
+#ifndef LFS_TRACE
+#ifdef LFS_YES_TRACE
+#define LFS_TRACE_(fmt, ...) \
+ printf("%s:%d:trace: " fmt "%s\r\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
+#else
+#define LFS_TRACE(...)
+#endif
+#endif
+
+#ifndef LFS_DEBUG
+#ifndef LFS_NO_DEBUG
+#define LFS_DEBUG_(fmt, ...) \
+ printf("%s:%d:debug: " fmt "%s\r\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "")
+#else
+#define LFS_DEBUG(...)
+#endif
+#endif
+
+#ifndef LFS_WARN
+#ifndef LFS_NO_WARN
+#define LFS_WARN_(fmt, ...) \
+ printf("%s:%d:warn: " fmt "%s\r\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "")
+#else
+#define LFS_WARN(...)
+#endif
+#endif
+
+#ifndef LFS_ERROR
+#ifndef LFS_NO_ERROR
+#define LFS_ERROR_(fmt, ...) \
+ printf("%s:%d:error: " fmt "%s\r\n", __FILE__, __LINE__, __VA_ARGS__)
+#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "")
+#else
+#define LFS_ERROR(...)
+#endif
+#endif
+
+// Runtime assertions
+#ifndef LFS_ASSERT
+#ifndef LFS_NO_ASSERT
+#define LFS_ASSERT(test) assert(test)
+#else
+#define LFS_ASSERT(test)
+#endif
+#endif
+
+
+// Builtin functions, these may be replaced by more efficient
+// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more
+// expensive basic C implementation for debugging purposes
+
+// Min/max functions for unsigned 32-bit numbers
+static inline uint32_t lfs_max(uint32_t a, uint32_t b) {
+ return (a > b) ? a : b;
+}
+
+static inline uint32_t lfs_min(uint32_t a, uint32_t b) {
+ return (a < b) ? a : b;
+}
+
+// Align to nearest multiple of a size
+static inline uint32_t lfs_aligndown(uint32_t a, uint32_t alignment) {
+ return a - (a % alignment);
+}
+
+static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) {
+ return lfs_aligndown(a + alignment-1, alignment);
+}
+
+// Find the smallest power of 2 greater than or equal to a
+static inline uint32_t lfs_npw2(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
+ return 32 - __builtin_clz(a-1);
+#else
+ uint32_t r = 0;
+ uint32_t s;
+ a -= 1;
+ s = (a > 0xffff) << 4; a >>= s; r |= s;
+ s = (a > 0xff ) << 3; a >>= s; r |= s;
+ s = (a > 0xf ) << 2; a >>= s; r |= s;
+ s = (a > 0x3 ) << 1; a >>= s; r |= s;
+ return (r | (a >> 1)) + 1;
+#endif
+}
+
+// Count the number of trailing binary zeros in a
+// lfs_ctz(0) may be undefined
+static inline uint32_t lfs_ctz(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__)
+ return __builtin_ctz(a);
+#else
+ return lfs_npw2((a & -a) + 1) - 1;
+#endif
+}
+
+// Count the number of binary ones in a
+static inline uint32_t lfs_popc(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
+ return __builtin_popcount(a);
+#else
+ a = a - ((a >> 1) & 0x55555555);
+ a = (a & 0x33333333) + ((a >> 2) & 0x33333333);
+ return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
+#endif
+}
+
+// Find the sequence comparison of a and b, this is the distance
+// between a and b ignoring overflow
+static inline int lfs_scmp(uint32_t a, uint32_t b) {
+ return (int)(unsigned)(a - b);
+}
+
+// Convert between 32-bit little-endian and native order
+static inline uint32_t lfs_fromle32(uint32_t a) {
+#if (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ return a;
+#elif !defined(LFS_NO_INTRINSICS) && ( \
+ (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
+ return __builtin_bswap32(a);
+#else
+ return (((uint8_t*)&a)[0] << 0) |
+ (((uint8_t*)&a)[1] << 8) |
+ (((uint8_t*)&a)[2] << 16) |
+ (((uint8_t*)&a)[3] << 24);
+#endif
+}
+
+static inline uint32_t lfs_tole32(uint32_t a) {
+ return lfs_fromle32(a);
+}
+
+// Convert between 32-bit big-endian and native order
+static inline uint32_t lfs_frombe32(uint32_t a) {
+#if !defined(LFS_NO_INTRINSICS) && ( \
+ (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
+ return __builtin_bswap32(a);
+#elif (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
+ (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+ return a;
+#else
+ return (((uint8_t*)&a)[0] << 24) |
+ (((uint8_t*)&a)[1] << 16) |
+ (((uint8_t*)&a)[2] << 8) |
+ (((uint8_t*)&a)[3] << 0);
+#endif
+}
+
+static inline uint32_t lfs_tobe32(uint32_t a) {
+ return lfs_frombe32(a);
+}
+
+// Calculate CRC-32 with polynomial = 0x04c11db7
+uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
+
+// Allocate memory, only used if buffers are not provided to littlefs
+// Note, memory must be 64-bit aligned
+static inline void *lfs_malloc(size_t size) {
+#ifndef LFS_NO_MALLOC
+ return malloc(size);
+#else
+ (void)size;
+ return NULL;
+#endif
+}
+
+// Deallocate memory, only used if buffers are not provided to littlefs
+static inline void lfs_free(void *p) {
+#ifndef LFS_NO_MALLOC
+ free(p);
+#else
+ (void)p;
+#endif
+}
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
+#endif
diff --git a/components/fs/littlefs/littlefs/scripts/code.py b/components/fs/littlefs/littlefs/scripts/code.py
new file mode 100755
index 00000000..b394e9cd
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/code.py
@@ -0,0 +1,284 @@
+#!/usr/bin/env python3
+#
+# Script to find code size at the function level. Basically just a bit wrapper
+# around nm with some extra conveniences for comparing builds. Heavily inspired
+# by Linux's Bloat-O-Meter.
+#
+
+import os
+import glob
+import itertools as it
+import subprocess as sp
+import shlex
+import re
+import csv
+import collections as co
+
+
+OBJ_PATHS = ['*.o']
+
+def collect(paths, **args):
+ results = co.defaultdict(lambda: 0)
+ pattern = re.compile(
+ '^(?P[0-9a-fA-F]+)' +
+ ' (?P[%s])' % re.escape(args['type']) +
+ ' (?P.+?)$')
+ for path in paths:
+ # note nm-tool may contain extra args
+ cmd = args['nm_tool'] + ['--size-sort', path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace')
+ for line in proc.stdout:
+ m = pattern.match(line)
+ if m:
+ results[(path, m.group('func'))] += int(m.group('size'), 16)
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ flat_results = []
+ for (file, func), size in results.items():
+ # map to source files
+ if args.get('build_dir'):
+ file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
+ # replace .o with .c, different scripts report .o/.c, we need to
+ # choose one if we want to deduplicate csv files
+ file = re.sub('\.o$', '.c', file)
+ # discard internal functions
+ if not args.get('everything'):
+ if func.startswith('__'):
+ continue
+ # discard .8449 suffixes created by optimizer
+ func = re.sub('\.[0-9]+', '', func)
+
+ flat_results.append((file, func, size))
+
+ return flat_results
+
+def main(**args):
+ def openio(path, mode='r'):
+ if path == '-':
+ if 'r' in mode:
+ return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+ else:
+ return open(path, mode)
+
+ # find sizes
+ if not args.get('use', None):
+ # find .o files
+ paths = []
+ for path in args['obj_paths']:
+ if os.path.isdir(path):
+ path = path + '/*.o'
+
+ for path in glob.glob(path):
+ paths.append(path)
+
+ if not paths:
+ print('no .obj files found in %r?' % args['obj_paths'])
+ sys.exit(-1)
+
+ results = collect(paths, **args)
+ else:
+ with openio(args['use']) as f:
+ r = csv.DictReader(f)
+ results = [
+ ( result['file'],
+ result['name'],
+ int(result['code_size']))
+ for result in r
+ if result.get('code_size') not in {None, ''}]
+
+ total = 0
+ for _, _, size in results:
+ total += size
+
+ # find previous results?
+ if args.get('diff'):
+ try:
+ with openio(args['diff']) as f:
+ r = csv.DictReader(f)
+ prev_results = [
+ ( result['file'],
+ result['name'],
+ int(result['code_size']))
+ for result in r
+ if result.get('code_size') not in {None, ''}]
+ except FileNotFoundError:
+ prev_results = []
+
+ prev_total = 0
+ for _, _, size in prev_results:
+ prev_total += size
+
+ # write results to CSV
+ if args.get('output'):
+ merged_results = co.defaultdict(lambda: {})
+ other_fields = []
+
+ # merge?
+ if args.get('merge'):
+ try:
+ with openio(args['merge']) as f:
+ r = csv.DictReader(f)
+ for result in r:
+ file = result.pop('file', '')
+ func = result.pop('name', '')
+ result.pop('code_size', None)
+ merged_results[(file, func)] = result
+ other_fields = result.keys()
+ except FileNotFoundError:
+ pass
+
+ for file, func, size in results:
+ merged_results[(file, func)]['code_size'] = size
+
+ with openio(args['output'], 'w') as f:
+ w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
+ w.writeheader()
+ for (file, func), result in sorted(merged_results.items()):
+ w.writerow({'file': file, 'name': func, **result})
+
+ # print results
+ def dedup_entries(results, by='name'):
+ entries = co.defaultdict(lambda: 0)
+ for file, func, size in results:
+ entry = (file if by == 'file' else func)
+ entries[entry] += size
+ return entries
+
+ def diff_entries(olds, news):
+ diff = co.defaultdict(lambda: (0, 0, 0, 0))
+ for name, new in news.items():
+ diff[name] = (0, new, new, 1.0)
+ for name, old in olds.items():
+ _, new, _, _ = diff[name]
+ diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
+ return diff
+
+ def sorted_entries(entries):
+ if args.get('size_sort'):
+ return sorted(entries, key=lambda x: (-x[1], x))
+ elif args.get('reverse_size_sort'):
+ return sorted(entries, key=lambda x: (+x[1], x))
+ else:
+ return sorted(entries)
+
+ def sorted_diff_entries(entries):
+ if args.get('size_sort'):
+ return sorted(entries, key=lambda x: (-x[1][1], x))
+ elif args.get('reverse_size_sort'):
+ return sorted(entries, key=lambda x: (+x[1][1], x))
+ else:
+ return sorted(entries, key=lambda x: (-x[1][3], x))
+
+ def print_header(by=''):
+ if not args.get('diff'):
+ print('%-36s %7s' % (by, 'size'))
+ else:
+ print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
+
+ def print_entry(name, size):
+ print("%-36s %7d" % (name, size))
+
+ def print_diff_entry(name, old, new, diff, ratio):
+ print("%-36s %7s %7s %+7d%s" % (name,
+ old or "-",
+ new or "-",
+ diff,
+ ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+ def print_entries(by='name'):
+ entries = dedup_entries(results, by=by)
+
+ if not args.get('diff'):
+ print_header(by=by)
+ for name, size in sorted_entries(entries.items()):
+ print_entry(name, size)
+ else:
+ prev_entries = dedup_entries(prev_results, by=by)
+ diff = diff_entries(prev_entries, entries)
+ print_header(by='%s (%d added, %d removed)' % (by,
+ sum(1 for old, _, _, _ in diff.values() if not old),
+ sum(1 for _, new, _, _ in diff.values() if not new)))
+ for name, (old, new, diff, ratio) in sorted_diff_entries(
+ diff.items()):
+ if ratio or args.get('all'):
+ print_diff_entry(name, old, new, diff, ratio)
+
+ def print_totals():
+ if not args.get('diff'):
+ print_entry('TOTAL', total)
+ else:
+ ratio = (0.0 if not prev_total and not total
+ else 1.0 if not prev_total
+ else (total-prev_total)/prev_total)
+ print_diff_entry('TOTAL',
+ prev_total, total,
+ total-prev_total,
+ ratio)
+
+ if args.get('quiet'):
+ pass
+ elif args.get('summary'):
+ print_header()
+ print_totals()
+ elif args.get('files'):
+ print_entries(by='file')
+ print_totals()
+ else:
+ print_entries(by='name')
+ print_totals()
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Find code size at the function level.")
+ parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
+ help="Description of where to find *.o files. May be a directory \
+ or a list of paths. Defaults to %r." % OBJ_PATHS)
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument('-q', '--quiet', action='store_true',
+ help="Don't show anything, useful with -o.")
+ parser.add_argument('-o', '--output',
+ help="Specify CSV file to store results.")
+ parser.add_argument('-u', '--use',
+ help="Don't compile and find code sizes, instead use this CSV file.")
+ parser.add_argument('-d', '--diff',
+ help="Specify CSV file to diff code size against.")
+ parser.add_argument('-m', '--merge',
+ help="Merge with an existing CSV file when writing to output.")
+ parser.add_argument('-a', '--all', action='store_true',
+ help="Show all functions, not just the ones that changed.")
+ parser.add_argument('-A', '--everything', action='store_true',
+ help="Include builtin and libc specific symbols.")
+ parser.add_argument('-s', '--size-sort', action='store_true',
+ help="Sort by size.")
+ parser.add_argument('-S', '--reverse-size-sort', action='store_true',
+ help="Sort by size, but backwards.")
+ parser.add_argument('-F', '--files', action='store_true',
+ help="Show file-level code sizes. Note this does not include padding! "
+ "So sizes may differ from other tools.")
+ parser.add_argument('-Y', '--summary', action='store_true',
+ help="Only show the total code size.")
+ parser.add_argument('--type', default='tTrRdD',
+ help="Type of symbols to report, this uses the same single-character "
+ "type-names emitted by nm. Defaults to %(default)r.")
+ parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
+ help="Path to the nm tool to use.")
+ parser.add_argument('--build-dir',
+ help="Specify the relative build directory. Used to map object files \
+ to the correct source files.")
+ sys.exit(main(**vars(parser.parse_args())))
diff --git a/components/fs/littlefs/littlefs/scripts/coverage.py b/components/fs/littlefs/littlefs/scripts/coverage.py
new file mode 100755
index 00000000..b3a90ed2
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/coverage.py
@@ -0,0 +1,323 @@
+#!/usr/bin/env python3
+#
+# Parse and report coverage info from .info files generated by lcov
+#
+import os
+import glob
+import csv
+import re
+import collections as co
+import bisect as b
+
+
+INFO_PATHS = ['tests/*.toml.info']
+
+def collect(paths, **args):
+ file = None
+ funcs = []
+ lines = co.defaultdict(lambda: 0)
+ pattern = re.compile(
+ '^(?PSF:/?(?P.*))$'
+ '|^(?PFN:(?P[0-9]*),(?P.*))$'
+ '|^(?PDA:(?P[0-9]*),(?P[0-9]*))$')
+ for path in paths:
+ with open(path) as f:
+ for line in f:
+ m = pattern.match(line)
+ if m and m.group('file'):
+ file = m.group('file_name')
+ elif m and file and m.group('func'):
+ funcs.append((file, int(m.group('func_lineno')),
+ m.group('func_name')))
+ elif m and file and m.group('line'):
+ lines[(file, int(m.group('line_lineno')))] += (
+ int(m.group('line_hits')))
+
+ # map line numbers to functions
+ funcs.sort()
+ def func_from_lineno(file, lineno):
+ i = b.bisect(funcs, (file, lineno))
+ if i and funcs[i-1][0] == file:
+ return funcs[i-1][2]
+ else:
+ return None
+
+ # reduce to function info
+ reduced_funcs = co.defaultdict(lambda: (0, 0))
+ for (file, line_lineno), line_hits in lines.items():
+ func = func_from_lineno(file, line_lineno)
+ if not func:
+ continue
+ hits, count = reduced_funcs[(file, func)]
+ reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1)
+
+ results = []
+ for (file, func), (hits, count) in reduced_funcs.items():
+ # discard internal/testing functions (test_* injected with
+ # internal testing)
+ if not args.get('everything'):
+ if func.startswith('__') or func.startswith('test_'):
+ continue
+ # discard .8449 suffixes created by optimizer
+ func = re.sub('\.[0-9]+', '', func)
+ results.append((file, func, hits, count))
+
+ return results
+
+
+def main(**args):
+ def openio(path, mode='r'):
+ if path == '-':
+ if 'r' in mode:
+ return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+ else:
+ return open(path, mode)
+
+ # find coverage
+ if not args.get('use'):
+ # find *.info files
+ paths = []
+ for path in args['info_paths']:
+ if os.path.isdir(path):
+ path = path + '/*.gcov'
+
+ for path in glob.glob(path):
+ paths.append(path)
+
+ if not paths:
+ print('no .info files found in %r?' % args['info_paths'])
+ sys.exit(-1)
+
+ results = collect(paths, **args)
+ else:
+ with openio(args['use']) as f:
+ r = csv.DictReader(f)
+ results = [
+ ( result['file'],
+ result['name'],
+ int(result['coverage_hits']),
+ int(result['coverage_count']))
+ for result in r
+ if result.get('coverage_hits') not in {None, ''}
+ if result.get('coverage_count') not in {None, ''}]
+
+ total_hits, total_count = 0, 0
+ for _, _, hits, count in results:
+ total_hits += hits
+ total_count += count
+
+ # find previous results?
+ if args.get('diff'):
+ try:
+ with openio(args['diff']) as f:
+ r = csv.DictReader(f)
+ prev_results = [
+ ( result['file'],
+ result['name'],
+ int(result['coverage_hits']),
+ int(result['coverage_count']))
+ for result in r
+ if result.get('coverage_hits') not in {None, ''}
+ if result.get('coverage_count') not in {None, ''}]
+ except FileNotFoundError:
+ prev_results = []
+
+ prev_total_hits, prev_total_count = 0, 0
+ for _, _, hits, count in prev_results:
+ prev_total_hits += hits
+ prev_total_count += count
+
+ # write results to CSV
+ if args.get('output'):
+ merged_results = co.defaultdict(lambda: {})
+ other_fields = []
+
+ # merge?
+ if args.get('merge'):
+ try:
+ with openio(args['merge']) as f:
+ r = csv.DictReader(f)
+ for result in r:
+ file = result.pop('file', '')
+ func = result.pop('name', '')
+ result.pop('coverage_hits', None)
+ result.pop('coverage_count', None)
+ merged_results[(file, func)] = result
+ other_fields = result.keys()
+ except FileNotFoundError:
+ pass
+
+ for file, func, hits, count in results:
+ merged_results[(file, func)]['coverage_hits'] = hits
+ merged_results[(file, func)]['coverage_count'] = count
+
+ with openio(args['output'], 'w') as f:
+ w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count'])
+ w.writeheader()
+ for (file, func), result in sorted(merged_results.items()):
+ w.writerow({'file': file, 'name': func, **result})
+
+ # print results
+ def dedup_entries(results, by='name'):
+ entries = co.defaultdict(lambda: (0, 0))
+ for file, func, hits, count in results:
+ entry = (file if by == 'file' else func)
+ entry_hits, entry_count = entries[entry]
+ entries[entry] = (entry_hits + hits, entry_count + count)
+ return entries
+
+ def diff_entries(olds, news):
+ diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
+ for name, (new_hits, new_count) in news.items():
+ diff[name] = (
+ 0, 0,
+ new_hits, new_count,
+ new_hits, new_count,
+ (new_hits/new_count if new_count else 1.0) - 1.0)
+ for name, (old_hits, old_count) in olds.items():
+ _, _, new_hits, new_count, _, _, _ = diff[name]
+ diff[name] = (
+ old_hits, old_count,
+ new_hits, new_count,
+ new_hits-old_hits, new_count-old_count,
+ ((new_hits/new_count if new_count else 1.0)
+ - (old_hits/old_count if old_count else 1.0)))
+ return diff
+
+ def sorted_entries(entries):
+ if args.get('coverage_sort'):
+ return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x))
+ elif args.get('reverse_coverage_sort'):
+ return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x))
+ else:
+ return sorted(entries)
+
+ def sorted_diff_entries(entries):
+ if args.get('coverage_sort'):
+ return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x))
+ elif args.get('reverse_coverage_sort'):
+ return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x))
+ else:
+ return sorted(entries, key=lambda x: (-x[1][6], x))
+
+ def print_header(by=''):
+ if not args.get('diff'):
+ print('%-36s %19s' % (by, 'hits/line'))
+ else:
+ print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
+
+ def print_entry(name, hits, count):
+ print("%-36s %11s %7s" % (name,
+ '%d/%d' % (hits, count)
+ if count else '-',
+ '%.1f%%' % (100*hits/count)
+ if count else '-'))
+
+ def print_diff_entry(name,
+ old_hits, old_count,
+ new_hits, new_count,
+ diff_hits, diff_count,
+ ratio):
+ print("%-36s %11s %7s %11s %7s %11s%s" % (name,
+ '%d/%d' % (old_hits, old_count)
+ if old_count else '-',
+ '%.1f%%' % (100*old_hits/old_count)
+ if old_count else '-',
+ '%d/%d' % (new_hits, new_count)
+ if new_count else '-',
+ '%.1f%%' % (100*new_hits/new_count)
+ if new_count else '-',
+ '%+d/%+d' % (diff_hits, diff_count),
+ ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+ def print_entries(by='name'):
+ entries = dedup_entries(results, by=by)
+
+ if not args.get('diff'):
+ print_header(by=by)
+ for name, (hits, count) in sorted_entries(entries.items()):
+ print_entry(name, hits, count)
+ else:
+ prev_entries = dedup_entries(prev_results, by=by)
+ diff = diff_entries(prev_entries, entries)
+ print_header(by='%s (%d added, %d removed)' % (by,
+ sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
+ sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
+ for name, (
+ old_hits, old_count,
+ new_hits, new_count,
+ diff_hits, diff_count, ratio) in sorted_diff_entries(
+ diff.items()):
+ if ratio or args.get('all'):
+ print_diff_entry(name,
+ old_hits, old_count,
+ new_hits, new_count,
+ diff_hits, diff_count,
+ ratio)
+
+ def print_totals():
+ if not args.get('diff'):
+ print_entry('TOTAL', total_hits, total_count)
+ else:
+ ratio = ((total_hits/total_count
+ if total_count else 1.0)
+ - (prev_total_hits/prev_total_count
+ if prev_total_count else 1.0))
+ print_diff_entry('TOTAL',
+ prev_total_hits, prev_total_count,
+ total_hits, total_count,
+ total_hits-prev_total_hits, total_count-prev_total_count,
+ ratio)
+
+ if args.get('quiet'):
+ pass
+ elif args.get('summary'):
+ print_header()
+ print_totals()
+ elif args.get('files'):
+ print_entries(by='file')
+ print_totals()
+ else:
+ print_entries(by='name')
+ print_totals()
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Parse and report coverage info from .info files \
+ generated by lcov")
+ parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
+ help="Description of where to find *.info files. May be a directory \
+ or list of paths. *.info files will be merged to show the total \
+ coverage. Defaults to %r." % INFO_PATHS)
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument('-o', '--output',
+ help="Specify CSV file to store results.")
+ parser.add_argument('-u', '--use',
+ help="Don't do any work, instead use this CSV file.")
+ parser.add_argument('-d', '--diff',
+ help="Specify CSV file to diff code size against.")
+ parser.add_argument('-m', '--merge',
+ help="Merge with an existing CSV file when writing to output.")
+ parser.add_argument('-a', '--all', action='store_true',
+ help="Show all functions, not just the ones that changed.")
+ parser.add_argument('-A', '--everything', action='store_true',
+ help="Include builtin and libc specific symbols.")
+ parser.add_argument('-s', '--coverage-sort', action='store_true',
+ help="Sort by coverage.")
+ parser.add_argument('-S', '--reverse-coverage-sort', action='store_true',
+ help="Sort by coverage, but backwards.")
+ parser.add_argument('-F', '--files', action='store_true',
+ help="Show file-level coverage.")
+ parser.add_argument('-Y', '--summary', action='store_true',
+ help="Only show the total coverage.")
+ parser.add_argument('-q', '--quiet', action='store_true',
+ help="Don't show anything, useful with -o.")
+ parser.add_argument('--build-dir',
+ help="Specify the relative build directory. Used to map object files \
+ to the correct source files.")
+ sys.exit(main(**vars(parser.parse_args())))
diff --git a/components/fs/littlefs/littlefs/scripts/data.py b/components/fs/littlefs/littlefs/scripts/data.py
new file mode 100755
index 00000000..4b8e00da
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/data.py
@@ -0,0 +1,283 @@
+#!/usr/bin/env python3
+#
+# Script to find data size at the function level. Basically just a bit wrapper
+# around nm with some extra conveniences for comparing builds. Heavily inspired
+# by Linux's Bloat-O-Meter.
+#
+
+import os
+import glob
+import itertools as it
+import subprocess as sp
+import shlex
+import re
+import csv
+import collections as co
+
+
+OBJ_PATHS = ['*.o']
+
+def collect(paths, **args):
+ results = co.defaultdict(lambda: 0)
+ pattern = re.compile(
+ '^(?P[0-9a-fA-F]+)' +
+ ' (?P[%s])' % re.escape(args['type']) +
+ ' (?P.+?)$')
+ for path in paths:
+ # note nm-tool may contain extra args
+ cmd = args['nm_tool'] + ['--size-sort', path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace')
+ for line in proc.stdout:
+ m = pattern.match(line)
+ if m:
+ results[(path, m.group('func'))] += int(m.group('size'), 16)
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ flat_results = []
+ for (file, func), size in results.items():
+ # map to source files
+ if args.get('build_dir'):
+ file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
+ # replace .o with .c, different scripts report .o/.c, we need to
+ # choose one if we want to deduplicate csv files
+ file = re.sub('\.o$', '.c', file)
+ # discard internal functions
+ if not args.get('everything'):
+ if func.startswith('__'):
+ continue
+ # discard .8449 suffixes created by optimizer
+ func = re.sub('\.[0-9]+', '', func)
+ flat_results.append((file, func, size))
+
+ return flat_results
+
+def main(**args):
+ def openio(path, mode='r'):
+ if path == '-':
+ if 'r' in mode:
+ return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+ else:
+ return open(path, mode)
+
+ # find sizes
+ if not args.get('use', None):
+ # find .o files
+ paths = []
+ for path in args['obj_paths']:
+ if os.path.isdir(path):
+ path = path + '/*.o'
+
+ for path in glob.glob(path):
+ paths.append(path)
+
+ if not paths:
+ print('no .obj files found in %r?' % args['obj_paths'])
+ sys.exit(-1)
+
+ results = collect(paths, **args)
+ else:
+ with openio(args['use']) as f:
+ r = csv.DictReader(f)
+ results = [
+ ( result['file'],
+ result['name'],
+ int(result['data_size']))
+ for result in r
+ if result.get('data_size') not in {None, ''}]
+
+ total = 0
+ for _, _, size in results:
+ total += size
+
+ # find previous results?
+ if args.get('diff'):
+ try:
+ with openio(args['diff']) as f:
+ r = csv.DictReader(f)
+ prev_results = [
+ ( result['file'],
+ result['name'],
+ int(result['data_size']))
+ for result in r
+ if result.get('data_size') not in {None, ''}]
+ except FileNotFoundError:
+ prev_results = []
+
+ prev_total = 0
+ for _, _, size in prev_results:
+ prev_total += size
+
+ # write results to CSV
+ if args.get('output'):
+ merged_results = co.defaultdict(lambda: {})
+ other_fields = []
+
+ # merge?
+ if args.get('merge'):
+ try:
+ with openio(args['merge']) as f:
+ r = csv.DictReader(f)
+ for result in r:
+ file = result.pop('file', '')
+ func = result.pop('name', '')
+ result.pop('data_size', None)
+ merged_results[(file, func)] = result
+ other_fields = result.keys()
+ except FileNotFoundError:
+ pass
+
+ for file, func, size in results:
+ merged_results[(file, func)]['data_size'] = size
+
+ with openio(args['output'], 'w') as f:
+ w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
+ w.writeheader()
+ for (file, func), result in sorted(merged_results.items()):
+ w.writerow({'file': file, 'name': func, **result})
+
+ # print results
+ def dedup_entries(results, by='name'):
+ entries = co.defaultdict(lambda: 0)
+ for file, func, size in results:
+ entry = (file if by == 'file' else func)
+ entries[entry] += size
+ return entries
+
+ def diff_entries(olds, news):
+ diff = co.defaultdict(lambda: (0, 0, 0, 0))
+ for name, new in news.items():
+ diff[name] = (0, new, new, 1.0)
+ for name, old in olds.items():
+ _, new, _, _ = diff[name]
+ diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
+ return diff
+
+ def sorted_entries(entries):
+ if args.get('size_sort'):
+ return sorted(entries, key=lambda x: (-x[1], x))
+ elif args.get('reverse_size_sort'):
+ return sorted(entries, key=lambda x: (+x[1], x))
+ else:
+ return sorted(entries)
+
+ def sorted_diff_entries(entries):
+ if args.get('size_sort'):
+ return sorted(entries, key=lambda x: (-x[1][1], x))
+ elif args.get('reverse_size_sort'):
+ return sorted(entries, key=lambda x: (+x[1][1], x))
+ else:
+ return sorted(entries, key=lambda x: (-x[1][3], x))
+
+ def print_header(by=''):
+ if not args.get('diff'):
+ print('%-36s %7s' % (by, 'size'))
+ else:
+ print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
+
+ def print_entry(name, size):
+ print("%-36s %7d" % (name, size))
+
+ def print_diff_entry(name, old, new, diff, ratio):
+ print("%-36s %7s %7s %+7d%s" % (name,
+ old or "-",
+ new or "-",
+ diff,
+ ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+ def print_entries(by='name'):
+ entries = dedup_entries(results, by=by)
+
+ if not args.get('diff'):
+ print_header(by=by)
+ for name, size in sorted_entries(entries.items()):
+ print_entry(name, size)
+ else:
+ prev_entries = dedup_entries(prev_results, by=by)
+ diff = diff_entries(prev_entries, entries)
+ print_header(by='%s (%d added, %d removed)' % (by,
+ sum(1 for old, _, _, _ in diff.values() if not old),
+ sum(1 for _, new, _, _ in diff.values() if not new)))
+ for name, (old, new, diff, ratio) in sorted_diff_entries(
+ diff.items()):
+ if ratio or args.get('all'):
+ print_diff_entry(name, old, new, diff, ratio)
+
+ def print_totals():
+ if not args.get('diff'):
+ print_entry('TOTAL', total)
+ else:
+ ratio = (0.0 if not prev_total and not total
+ else 1.0 if not prev_total
+ else (total-prev_total)/prev_total)
+ print_diff_entry('TOTAL',
+ prev_total, total,
+ total-prev_total,
+ ratio)
+
+ if args.get('quiet'):
+ pass
+ elif args.get('summary'):
+ print_header()
+ print_totals()
+ elif args.get('files'):
+ print_entries(by='file')
+ print_totals()
+ else:
+ print_entries(by='name')
+ print_totals()
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Find data size at the function level.")
+ parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
+ help="Description of where to find *.o files. May be a directory \
+ or a list of paths. Defaults to %r." % OBJ_PATHS)
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument('-q', '--quiet', action='store_true',
+ help="Don't show anything, useful with -o.")
+ parser.add_argument('-o', '--output',
+ help="Specify CSV file to store results.")
+ parser.add_argument('-u', '--use',
+ help="Don't compile and find data sizes, instead use this CSV file.")
+ parser.add_argument('-d', '--diff',
+ help="Specify CSV file to diff data size against.")
+ parser.add_argument('-m', '--merge',
+ help="Merge with an existing CSV file when writing to output.")
+ parser.add_argument('-a', '--all', action='store_true',
+ help="Show all functions, not just the ones that changed.")
+ parser.add_argument('-A', '--everything', action='store_true',
+ help="Include builtin and libc specific symbols.")
+ parser.add_argument('-s', '--size-sort', action='store_true',
+ help="Sort by size.")
+ parser.add_argument('-S', '--reverse-size-sort', action='store_true',
+ help="Sort by size, but backwards.")
+ parser.add_argument('-F', '--files', action='store_true',
+ help="Show file-level data sizes. Note this does not include padding! "
+ "So sizes may differ from other tools.")
+ parser.add_argument('-Y', '--summary', action='store_true',
+ help="Only show the total data size.")
+ parser.add_argument('--type', default='dDbB',
+ help="Type of symbols to report, this uses the same single-character "
+ "type-names emitted by nm. Defaults to %(default)r.")
+ parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
+ help="Path to the nm tool to use.")
+ parser.add_argument('--build-dir',
+ help="Specify the relative build directory. Used to map object files \
+ to the correct source files.")
+ sys.exit(main(**vars(parser.parse_args())))
diff --git a/components/fs/littlefs/littlefs/scripts/explode_asserts.py b/components/fs/littlefs/littlefs/scripts/explode_asserts.py
new file mode 100755
index 00000000..8a8e5b1c
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/explode_asserts.py
@@ -0,0 +1,383 @@
+#!/usr/bin/env python3
+
+import re
+import sys
+
+PATTERN = ['LFS_ASSERT', 'assert']
+PREFIX = 'LFS'
+MAXWIDTH = 16
+
+ASSERT = "__{PREFIX}_ASSERT_{TYPE}_{COMP}"
+FAIL = """
+__attribute__((unused))
+static void __{prefix}_assert_fail_{type}(
+ const char *file, int line, const char *comp,
+ {ctype} lh, size_t lsize,
+ {ctype} rh, size_t rsize) {{
+ printf("%s:%d:assert: assert failed with ", file, line);
+ __{prefix}_assert_print_{type}(lh, lsize);
+ printf(", expected %s ", comp);
+ __{prefix}_assert_print_{type}(rh, rsize);
+ printf("\\n");
+ fflush(NULL);
+ raise(SIGABRT);
+}}
+"""
+
+COMP = {
+ '==': 'eq',
+ '!=': 'ne',
+ '<=': 'le',
+ '>=': 'ge',
+ '<': 'lt',
+ '>': 'gt',
+}
+
+TYPE = {
+ 'int': {
+ 'ctype': 'intmax_t',
+ 'fail': FAIL,
+ 'print': """
+ __attribute__((unused))
+ static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
+ (void)size;
+ printf("%"PRIiMAX, v);
+ }}
+ """,
+ 'assert': """
+ #define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
+ do {{
+ __typeof__(lh) _lh = lh;
+ __typeof__(lh) _rh = (__typeof__(lh))rh;
+ if (!(_lh {op} _rh)) {{
+ __{prefix}_assert_fail_{type}(file, line, "{comp}",
+ (intmax_t)_lh, 0, (intmax_t)_rh, 0);
+ }}
+ }} while (0)
+ """
+ },
+ 'bool': {
+ 'ctype': 'bool',
+ 'fail': FAIL,
+ 'print': """
+ __attribute__((unused))
+ static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
+ (void)size;
+ printf("%s", v ? "true" : "false");
+ }}
+ """,
+ 'assert': """
+ #define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
+ do {{
+ bool _lh = !!(lh);
+ bool _rh = !!(rh);
+ if (!(_lh {op} _rh)) {{
+ __{prefix}_assert_fail_{type}(file, line, "{comp}",
+ _lh, 0, _rh, 0);
+ }}
+ }} while (0)
+ """
+ },
+ 'mem': {
+ 'ctype': 'const void *',
+ 'fail': FAIL,
+ 'print': """
+ __attribute__((unused))
+ static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
+ const uint8_t *s = v;
+ printf("\\\"");
+ for (size_t i = 0; i < size && i < {maxwidth}; i++) {{
+ if (s[i] >= ' ' && s[i] <= '~') {{
+ printf("%c", s[i]);
+ }} else {{
+ printf("\\\\x%02x", s[i]);
+ }}
+ }}
+ if (size > {maxwidth}) {{
+ printf("...");
+ }}
+ printf("\\\"");
+ }}
+ """,
+ 'assert': """
+ #define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh, size)
+ do {{
+ const void *_lh = lh;
+ const void *_rh = rh;
+ if (!(memcmp(_lh, _rh, size) {op} 0)) {{
+ __{prefix}_assert_fail_{type}(file, line, "{comp}",
+ _lh, size, _rh, size);
+ }}
+ }} while (0)
+ """
+ },
+ 'str': {
+ 'ctype': 'const char *',
+ 'fail': FAIL,
+ 'print': """
+ __attribute__((unused))
+ static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
+ __{prefix}_assert_print_mem(v, size);
+ }}
+ """,
+ 'assert': """
+ #define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
+ do {{
+ const char *_lh = lh;
+ const char *_rh = rh;
+ if (!(strcmp(_lh, _rh) {op} 0)) {{
+ __{prefix}_assert_fail_{type}(file, line, "{comp}",
+ _lh, strlen(_lh), _rh, strlen(_rh));
+ }}
+ }} while (0)
+ """
+ }
+}
+
+def mkdecls(outf, maxwidth=16):
+ outf.write("#include \n")
+ outf.write("#include \n")
+ outf.write("#include \n")
+ outf.write("#include \n")
+ outf.write("#include \n")
+
+ for type, desc in sorted(TYPE.items()):
+ format = {
+ 'type': type.lower(), 'TYPE': type.upper(),
+ 'ctype': desc['ctype'],
+ 'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
+ 'maxwidth': maxwidth,
+ }
+ outf.write(re.sub('\s+', ' ',
+ desc['print'].strip().format(**format))+'\n')
+ outf.write(re.sub('\s+', ' ',
+ desc['fail'].strip().format(**format))+'\n')
+
+ for op, comp in sorted(COMP.items()):
+ format.update({
+ 'comp': comp.lower(), 'COMP': comp.upper(),
+ 'op': op,
+ })
+ outf.write(re.sub('\s+', ' ',
+ desc['assert'].strip().format(**format))+'\n')
+
+def mkassert(type, comp, lh, rh, size=None):
+ format = {
+ 'type': type.lower(), 'TYPE': type.upper(),
+ 'comp': comp.lower(), 'COMP': comp.upper(),
+ 'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
+ 'lh': lh.strip(' '),
+ 'rh': rh.strip(' '),
+ 'size': size,
+ }
+ if size:
+ return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh}, {size})')
+ .format(**format))
+ else:
+ return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh})')
+ .format(**format))
+
+
+# simple recursive descent parser
+LEX = {
+ 'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
+ 'assert': PATTERN,
+ 'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
+ 'arrow': ['=>'],
+ 'paren': ['\(', '\)'],
+ 'op': ['strcmp', 'memcmp', '->'],
+ 'comp': ['==', '!=', '<=', '>=', '<', '>'],
+ 'logic': ['\&\&', '\|\|'],
+ 'sep': [':', ';', '\{', '\}', ','],
+}
+
+class ParseFailure(Exception):
+ def __init__(self, expected, found):
+ self.expected = expected
+ self.found = found
+
+ def __str__(self):
+ return "expected %r, found %s..." % (
+ self.expected, repr(self.found)[:70])
+
+class Parse:
+ def __init__(self, inf, lexemes):
+ p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
+ for n, l in lexemes.items())
+ p = re.compile(p, re.DOTALL)
+ data = inf.read()
+ tokens = []
+ while True:
+ m = p.search(data)
+ if m:
+ if m.start() > 0:
+ tokens.append((None, data[:m.start()]))
+ tokens.append((m.lastgroup, m.group()))
+ data = data[m.end():]
+ else:
+ tokens.append((None, data))
+ break
+ self.tokens = tokens
+ self.off = 0
+
+ def lookahead(self, *pattern):
+ if self.off < len(self.tokens):
+ token = self.tokens[self.off]
+ if token[0] in pattern or token[1] in pattern:
+ self.m = token[1]
+ return self.m
+ self.m = None
+ return self.m
+
+ def accept(self, *patterns):
+ m = self.lookahead(*patterns)
+ if m is not None:
+ self.off += 1
+ return m
+
+ def expect(self, *patterns):
+ m = self.accept(*patterns)
+ if not m:
+ raise ParseFailure(patterns, self.tokens[self.off:])
+ return m
+
+ def push(self):
+ return self.off
+
+ def pop(self, state):
+ self.off = state
+
+def passert(p):
+ def pastr(p):
+ p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
+ p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
+ lh = pexpr(p) ; p.accept('ws')
+ p.expect(',') ; p.accept('ws')
+ rh = pexpr(p) ; p.accept('ws')
+ p.expect(')') ; p.accept('ws')
+ comp = p.expect('comp') ; p.accept('ws')
+ p.expect('0') ; p.accept('ws')
+ p.expect(')')
+ return mkassert('str', COMP[comp], lh, rh)
+
+ def pamem(p):
+ p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
+ p.expect('memcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
+ lh = pexpr(p) ; p.accept('ws')
+ p.expect(',') ; p.accept('ws')
+ rh = pexpr(p) ; p.accept('ws')
+ p.expect(',') ; p.accept('ws')
+ size = pexpr(p) ; p.accept('ws')
+ p.expect(')') ; p.accept('ws')
+ comp = p.expect('comp') ; p.accept('ws')
+ p.expect('0') ; p.accept('ws')
+ p.expect(')')
+ return mkassert('mem', COMP[comp], lh, rh, size)
+
+ def paint(p):
+ p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
+ lh = pexpr(p) ; p.accept('ws')
+ comp = p.expect('comp') ; p.accept('ws')
+ rh = pexpr(p) ; p.accept('ws')
+ p.expect(')')
+ return mkassert('int', COMP[comp], lh, rh)
+
+ def pabool(p):
+ p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
+ lh = pexprs(p) ; p.accept('ws')
+ p.expect(')')
+ return mkassert('bool', 'eq', lh, 'true')
+
+ def pa(p):
+ return p.expect('assert')
+
+ state = p.push()
+ lastf = None
+ for pa in [pastr, pamem, paint, pabool, pa]:
+ try:
+ return pa(p)
+ except ParseFailure as f:
+ p.pop(state)
+ lastf = f
+ else:
+ raise lastf
+
+def pexpr(p):
+ res = []
+ while True:
+ if p.accept('('):
+ res.append(p.m)
+ while True:
+ res.append(pexprs(p))
+ if p.accept('sep'):
+ res.append(p.m)
+ else:
+ break
+ res.append(p.expect(')'))
+ elif p.lookahead('assert'):
+ res.append(passert(p))
+ elif p.accept('assert', 'ws', 'string', 'op', None):
+ res.append(p.m)
+ else:
+ return ''.join(res)
+
+def pexprs(p):
+ res = []
+ while True:
+ res.append(pexpr(p))
+ if p.accept('comp', 'logic', ','):
+ res.append(p.m)
+ else:
+ return ''.join(res)
+
+def pstmt(p):
+ ws = p.accept('ws') or ''
+ lh = pexprs(p)
+ if p.accept('=>'):
+ rh = pexprs(p)
+ return ws + mkassert('int', 'eq', lh, rh)
+ else:
+ return ws + lh
+
+
+def main(args):
+ inf = open(args.input, 'r') if args.input else sys.stdin
+ outf = open(args.output, 'w') if args.output else sys.stdout
+
+ lexemes = LEX.copy()
+ if args.pattern:
+ lexemes['assert'] = args.pattern
+ p = Parse(inf, lexemes)
+
+ # write extra verbose asserts
+ mkdecls(outf, maxwidth=args.maxwidth)
+ if args.input:
+ outf.write("#line %d \"%s\"\n" % (1, args.input))
+
+ # parse and write out stmt at a time
+ try:
+ while True:
+ outf.write(pstmt(p))
+ if p.accept('sep'):
+ outf.write(p.m)
+ else:
+ break
+ except ParseFailure as f:
+ pass
+
+ for i in range(p.off, len(p.tokens)):
+ outf.write(p.tokens[i][1])
+
+if __name__ == "__main__":
+ import argparse
+ parser = argparse.ArgumentParser(
+ description="Cpp step that increases assert verbosity")
+ parser.add_argument('input', nargs='?',
+ help="Input C file after cpp.")
+ parser.add_argument('-o', '--output', required=True,
+ help="Output C file.")
+ parser.add_argument('-p', '--pattern', action='append',
+ help="Patterns to search for starting an assert statement.")
+ parser.add_argument('--maxwidth', default=MAXWIDTH, type=int,
+ help="Maximum number of characters to display for strcmp and memcmp.")
+ main(parser.parse_args())
diff --git a/components/fs/littlefs/littlefs/scripts/prefix.py b/components/fs/littlefs/littlefs/scripts/prefix.py
new file mode 100755
index 00000000..4c33ad48
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/prefix.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python2
+
+# This script replaces prefixes of files, and symbols in that file.
+# Useful for creating different versions of the codebase that don't
+# conflict at compile time.
+#
+# example:
+# $ ./scripts/prefix.py lfs2
+
+import os
+import os.path
+import re
+import glob
+import itertools
+import tempfile
+import shutil
+import subprocess
+
+DEFAULT_PREFIX = "lfs"
+
+def subn(from_prefix, to_prefix, name):
+ name, count1 = re.subn('\\b'+from_prefix, to_prefix, name)
+ name, count2 = re.subn('\\b'+from_prefix.upper(), to_prefix.upper(), name)
+ name, count3 = re.subn('\\B-D'+from_prefix.upper(),
+ '-D'+to_prefix.upper(), name)
+ return name, count1+count2+count3
+
+def main(from_prefix, to_prefix=None, files=None):
+ if not to_prefix:
+ from_prefix, to_prefix = DEFAULT_PREFIX, from_prefix
+
+ if not files:
+ files = subprocess.check_output([
+ 'git', 'ls-tree', '-r', '--name-only', 'HEAD']).split()
+
+ for oldname in files:
+ # Rename any matching file names
+ newname, namecount = subn(from_prefix, to_prefix, oldname)
+ if namecount:
+ subprocess.check_call(['git', 'mv', oldname, newname])
+
+ # Rename any prefixes in file
+ count = 0
+ with open(newname+'~', 'w') as tempf:
+ with open(newname) as newf:
+ for line in newf:
+ line, n = subn(from_prefix, to_prefix, line)
+ count += n
+ tempf.write(line)
+ shutil.copystat(newname, newname+'~')
+ os.rename(newname+'~', newname)
+ subprocess.check_call(['git', 'add', newname])
+
+ # Summary
+ print '%s: %d replacements' % (
+ '%s -> %s' % (oldname, newname) if namecount else oldname,
+ count)
+
+if __name__ == "__main__":
+ import sys
+ sys.exit(main(*sys.argv[1:]))
diff --git a/components/fs/littlefs/littlefs/scripts/readblock.py b/components/fs/littlefs/littlefs/scripts/readblock.py
new file mode 100755
index 00000000..817517bc
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/readblock.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+
+import subprocess as sp
+
+def main(args):
+ with open(args.disk, 'rb') as f:
+ f.seek(args.block * args.block_size)
+ block = (f.read(args.block_size)
+ .ljust(args.block_size, b'\xff'))
+
+ # what did you expect?
+ print("%-8s %-s" % ('off', 'data'))
+ return sp.run(['xxd', '-g1', '-'], input=block).returncode
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Hex dump a specific block in a disk.")
+ parser.add_argument('disk',
+ help="File representing the block device.")
+ parser.add_argument('block_size', type=lambda x: int(x, 0),
+ help="Size of a block in bytes.")
+ parser.add_argument('block', type=lambda x: int(x, 0),
+ help="Address of block to dump.")
+ sys.exit(main(parser.parse_args()))
diff --git a/components/fs/littlefs/littlefs/scripts/readmdir.py b/components/fs/littlefs/littlefs/scripts/readmdir.py
new file mode 100755
index 00000000..b6c3dcca
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/readmdir.py
@@ -0,0 +1,367 @@
+#!/usr/bin/env python3
+
+import struct
+import binascii
+import sys
+import itertools as it
+
+TAG_TYPES = {
+ 'splice': (0x700, 0x400),
+ 'create': (0x7ff, 0x401),
+ 'delete': (0x7ff, 0x4ff),
+ 'name': (0x700, 0x000),
+ 'reg': (0x7ff, 0x001),
+ 'dir': (0x7ff, 0x002),
+ 'superblock': (0x7ff, 0x0ff),
+ 'struct': (0x700, 0x200),
+ 'dirstruct': (0x7ff, 0x200),
+ 'ctzstruct': (0x7ff, 0x202),
+ 'inlinestruct': (0x7ff, 0x201),
+ 'userattr': (0x700, 0x300),
+ 'tail': (0x700, 0x600),
+ 'softtail': (0x7ff, 0x600),
+ 'hardtail': (0x7ff, 0x601),
+ 'gstate': (0x700, 0x700),
+ 'movestate': (0x7ff, 0x7ff),
+ 'crc': (0x700, 0x500),
+}
+
+class Tag:
+ def __init__(self, *args):
+ if len(args) == 1:
+ self.tag = args[0]
+ elif len(args) == 3:
+ if isinstance(args[0], str):
+ type = TAG_TYPES[args[0]][1]
+ else:
+ type = args[0]
+
+ if isinstance(args[1], str):
+ id = int(args[1], 0) if args[1] not in 'x.' else 0x3ff
+ else:
+ id = args[1]
+
+ if isinstance(args[2], str):
+ size = int(args[2], str) if args[2] not in 'x.' else 0x3ff
+ else:
+ size = args[2]
+
+ self.tag = (type << 20) | (id << 10) | size
+ else:
+ assert False
+
+ @property
+ def isvalid(self):
+ return not bool(self.tag & 0x80000000)
+
+ @property
+ def isattr(self):
+ return not bool(self.tag & 0x40000000)
+
+ @property
+ def iscompactable(self):
+ return bool(self.tag & 0x20000000)
+
+ @property
+ def isunique(self):
+ return not bool(self.tag & 0x10000000)
+
+ @property
+ def type(self):
+ return (self.tag & 0x7ff00000) >> 20
+
+ @property
+ def type1(self):
+ return (self.tag & 0x70000000) >> 20
+
+ @property
+ def type3(self):
+ return (self.tag & 0x7ff00000) >> 20
+
+ @property
+ def id(self):
+ return (self.tag & 0x000ffc00) >> 10
+
+ @property
+ def size(self):
+ return (self.tag & 0x000003ff) >> 0
+
+ @property
+ def dsize(self):
+ return 4 + (self.size if self.size != 0x3ff else 0)
+
+ @property
+ def chunk(self):
+ return self.type & 0xff
+
+ @property
+ def schunk(self):
+ return struct.unpack('b', struct.pack('B', self.chunk))[0]
+
+ def is_(self, type):
+ return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
+
+ def mkmask(self):
+ return Tag(
+ 0x700 if self.isunique else 0x7ff,
+ 0x3ff if self.isattr else 0,
+ 0)
+
+ def chid(self, nid):
+ ntag = Tag(self.type, nid, self.size)
+ if hasattr(self, 'off'): ntag.off = self.off
+ if hasattr(self, 'data'): ntag.data = self.data
+ if hasattr(self, 'crc'): ntag.crc = self.crc
+ return ntag
+
+ def typerepr(self):
+ if self.is_('crc') and getattr(self, 'crc', 0xffffffff) != 0xffffffff:
+ return 'crc (bad)'
+
+ reverse_types = {v: k for k, v in TAG_TYPES.items()}
+ for prefix in range(12):
+ mask = 0x7ff & ~((1 << prefix)-1)
+ if (mask, self.type & mask) in reverse_types:
+ type = reverse_types[mask, self.type & mask]
+ if prefix > 0:
+ return '%s %#0*x' % (
+ type, prefix//4, self.type & ((1 << prefix)-1))
+ else:
+ return type
+ else:
+ return '%02x' % self.type
+
+ def idrepr(self):
+ return repr(self.id) if self.id != 0x3ff else '.'
+
+ def sizerepr(self):
+ return repr(self.size) if self.size != 0x3ff else 'x'
+
+ def __repr__(self):
+ return 'Tag(%r, %d, %d)' % (self.typerepr(), self.id, self.size)
+
+ def __lt__(self, other):
+ return (self.id, self.type) < (other.id, other.type)
+
+ def __bool__(self):
+ return self.isvalid
+
+ def __int__(self):
+ return self.tag
+
+ def __index__(self):
+ return self.tag
+
+class MetadataPair:
+ def __init__(self, blocks):
+ if len(blocks) > 1:
+ self.pair = [MetadataPair([block]) for block in blocks]
+ self.pair = sorted(self.pair, reverse=True)
+
+ self.data = self.pair[0].data
+ self.rev = self.pair[0].rev
+ self.tags = self.pair[0].tags
+ self.ids = self.pair[0].ids
+ self.log = self.pair[0].log
+ self.all_ = self.pair[0].all_
+ return
+
+ self.pair = [self]
+ self.data = blocks[0]
+ block = self.data
+
+ self.rev, = struct.unpack('= 4:
+ ntag, = struct.unpack('>I', block[off:off+4])
+
+ tag = Tag(int(tag) ^ ntag)
+ tag.off = off + 4
+ tag.data = block[off+4:off+tag.dsize]
+ if tag.is_('crc'):
+ crc = binascii.crc32(block[off:off+4+4], crc)
+ else:
+ crc = binascii.crc32(block[off:off+tag.dsize], crc)
+ tag.crc = crc
+ off += tag.dsize
+
+ self.all_.append(tag)
+
+ if tag.is_('crc'):
+ # is valid commit?
+ if crc != 0xffffffff:
+ corrupt = True
+ if not corrupt:
+ self.log = self.all_.copy()
+
+ # reset tag parsing
+ crc = 0
+ tag = Tag(int(tag) ^ ((tag.type & 1) << 31))
+
+ # find active ids
+ self.ids = list(it.takewhile(
+ lambda id: Tag('name', id, 0) in self,
+ it.count()))
+
+ # find most recent tags
+ self.tags = []
+ for tag in self.log:
+ if tag.is_('crc') or tag.is_('splice'):
+ continue
+ elif tag.id == 0x3ff:
+ if tag in self and self[tag] is tag:
+ self.tags.append(tag)
+ else:
+ # id could have change, I know this is messy and slow
+ # but it works
+ for id in self.ids:
+ ntag = tag.chid(id)
+ if ntag in self and self[ntag] is tag:
+ self.tags.append(ntag)
+
+ self.tags = sorted(self.tags)
+
+ def __bool__(self):
+ return bool(self.log)
+
+ def __lt__(self, other):
+ # corrupt blocks don't count
+ if not self or not other:
+ return bool(other)
+
+ # use sequence arithmetic to avoid overflow
+ return not ((other.rev - self.rev) & 0x80000000)
+
+ def __contains__(self, args):
+ try:
+ self[args]
+ return True
+ except KeyError:
+ return False
+
+ def __getitem__(self, args):
+ if isinstance(args, tuple):
+ gmask, gtag = args
+ else:
+ gmask, gtag = args.mkmask(), args
+
+ gdiff = 0
+ for tag in reversed(self.log):
+ if (gmask.id != 0 and tag.is_('splice') and
+ tag.id <= gtag.id - gdiff):
+ if tag.is_('create') and tag.id == gtag.id - gdiff:
+ # creation point
+ break
+
+ gdiff += tag.schunk
+
+ if ((int(gmask) & int(tag)) ==
+ (int(gmask) & int(gtag.chid(gtag.id - gdiff)))):
+ if tag.size == 0x3ff:
+ # deleted
+ break
+
+ return tag
+
+ raise KeyError(gmask, gtag)
+
+ def _dump_tags(self, tags, f=sys.stdout, truncate=True):
+ f.write("%-8s %-8s %-13s %4s %4s" % (
+ 'off', 'tag', 'type', 'id', 'len'))
+ if truncate:
+ f.write(' data (truncated)')
+ f.write('\n')
+
+ for tag in tags:
+ f.write("%08x: %08x %-13s %4s %4s" % (
+ tag.off, tag,
+ tag.typerepr(), tag.idrepr(), tag.sizerepr()))
+ if truncate:
+ f.write(" %-23s %-8s\n" % (
+ ' '.join('%02x' % c for c in tag.data[:8]),
+ ''.join(c if c >= ' ' and c <= '~' else '.'
+ for c in map(chr, tag.data[:8]))))
+ else:
+ f.write("\n")
+ for i in range(0, len(tag.data), 16):
+ f.write(" %08x: %-47s %-16s\n" % (
+ tag.off+i,
+ ' '.join('%02x' % c for c in tag.data[i:i+16]),
+ ''.join(c if c >= ' ' and c <= '~' else '.'
+ for c in map(chr, tag.data[i:i+16]))))
+
+ def dump_tags(self, f=sys.stdout, truncate=True):
+ self._dump_tags(self.tags, f=f, truncate=truncate)
+
+ def dump_log(self, f=sys.stdout, truncate=True):
+ self._dump_tags(self.log, f=f, truncate=truncate)
+
+ def dump_all(self, f=sys.stdout, truncate=True):
+ self._dump_tags(self.all_, f=f, truncate=truncate)
+
+def main(args):
+ blocks = []
+ with open(args.disk, 'rb') as f:
+ for block in [args.block1, args.block2]:
+ if block is None:
+ continue
+ f.seek(block * args.block_size)
+ blocks.append(f.read(args.block_size)
+ .ljust(args.block_size, b'\xff'))
+
+ # find most recent pair
+ mdir = MetadataPair(blocks)
+
+ try:
+ mdir.tail = mdir[Tag('tail', 0, 0)]
+ if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
+ mdir.tail = None
+ except KeyError:
+ mdir.tail = None
+
+ print("mdir {%s} rev %d%s%s%s" % (
+ ', '.join('%#x' % b
+ for b in [args.block1, args.block2]
+ if b is not None),
+ mdir.rev,
+ ' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
+ if len(mdir.pair) > 1 else '',
+ ' (corrupted!)' if not mdir else '',
+ ' -> {%#x, %#x}' % struct.unpack('=%d" % max(tag.size, 1))
+ if tag.type:
+ print(" move dir {%#x, %#x} id %d" % (
+ blocks[0], blocks[1], tag.id))
+
+ # print mdir info
+ for i, dir in enumerate(dirs):
+ print("dir %s" % (json.dumps(dir[0].path)
+ if hasattr(dir[0], 'path') else '(orphan)'))
+
+ for j, mdir in enumerate(dir):
+ print("mdir {%#x, %#x} rev %d (was %d)%s%s" % (
+ mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev,
+ ' (corrupted!)' if not mdir else '',
+ ' -> {%#x, %#x}' % struct.unpack(' 0 and m.isinf(diff_limit)
+ else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
+ else '%+d' % diff_limit),
+ '' if not ratio
+ else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
+ else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
+ else ' (%+.1f%%)' % (100*ratio)))
+
+ def print_entries(by='name'):
+ # build optional tree of dependencies
+ def print_deps(entries, depth, print,
+ filter=lambda _: True,
+ prefixes=('', '', '', '')):
+ entries = entries if isinstance(entries, list) else list(entries)
+ filtered_entries = [(name, entry)
+ for name, entry in entries
+ if filter(name)]
+ for i, (name, entry) in enumerate(filtered_entries):
+ last = (i == len(filtered_entries)-1)
+ print(prefixes[0+last] + name, entry)
+
+ if depth > 0:
+ deps = entry[-1]
+ print_deps(entries, depth-1, print,
+ lambda name: name in deps,
+ ( prefixes[2+last] + "|-> ",
+ prefixes[2+last] + "'-> ",
+ prefixes[2+last] + "| ",
+ prefixes[2+last] + " "))
+
+ entries = dedup_entries(results, by=by)
+
+ if not args.get('diff'):
+ print_header(by=by)
+ print_deps(
+ sorted_entries(entries.items()),
+ args.get('depth') or 0,
+ lambda name, entry: print_entry(name, *entry[:-1]))
+ else:
+ prev_entries = dedup_entries(prev_results, by=by)
+ diff = diff_entries(prev_entries, entries)
+
+ print_header(by='%s (%d added, %d removed)' % (by,
+ sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
+ sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
+ print_deps(
+ filter(
+ lambda x: x[1][6] or args.get('all'),
+ sorted_diff_entries(diff.items())),
+ args.get('depth') or 0,
+ lambda name, entry: print_diff_entry(name, *entry[:-1]))
+
+ def print_totals():
+ if not args.get('diff'):
+ print_entry('TOTAL', total_frame, total_limit)
+ else:
+ diff_frame = total_frame - prev_total_frame
+ diff_limit = (
+ 0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
+ else (total_limit or 0) - (prev_total_limit or 0))
+ ratio = (
+ 0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
+ else +float('inf') if m.isinf(total_limit or 0)
+ else -float('inf') if m.isinf(prev_total_limit or 0)
+ else 0.0 if not prev_total_limit and not total_limit
+ else 1.0 if not prev_total_limit
+ else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
+ print_diff_entry('TOTAL',
+ prev_total_frame, prev_total_limit,
+ total_frame, total_limit,
+ diff_frame, diff_limit,
+ ratio)
+
+ if args.get('quiet'):
+ pass
+ elif args.get('summary'):
+ print_header()
+ print_totals()
+ elif args.get('files'):
+ print_entries(by='file')
+ print_totals()
+ else:
+ print_entries(by='name')
+ print_totals()
+
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Find stack usage at the function level.")
+ parser.add_argument('ci_paths', nargs='*', default=CI_PATHS,
+ help="Description of where to find *.ci files. May be a directory \
+ or a list of paths. Defaults to %r." % CI_PATHS)
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument('-q', '--quiet', action='store_true',
+ help="Don't show anything, useful with -o.")
+ parser.add_argument('-o', '--output',
+ help="Specify CSV file to store results.")
+ parser.add_argument('-u', '--use',
+ help="Don't parse callgraph files, instead use this CSV file.")
+ parser.add_argument('-d', '--diff',
+ help="Specify CSV file to diff against.")
+ parser.add_argument('-m', '--merge',
+ help="Merge with an existing CSV file when writing to output.")
+ parser.add_argument('-a', '--all', action='store_true',
+ help="Show all functions, not just the ones that changed.")
+ parser.add_argument('-A', '--everything', action='store_true',
+ help="Include builtin and libc specific symbols.")
+ parser.add_argument('-s', '--limit-sort', action='store_true',
+ help="Sort by stack limit.")
+ parser.add_argument('-S', '--reverse-limit-sort', action='store_true',
+ help="Sort by stack limit, but backwards.")
+ parser.add_argument('--frame-sort', action='store_true',
+ help="Sort by stack frame size.")
+ parser.add_argument('--reverse-frame-sort', action='store_true',
+ help="Sort by stack frame size, but backwards.")
+ parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0),
+ nargs='?', const=float('inf'),
+ help="Depth of dependencies to show.")
+ parser.add_argument('-F', '--files', action='store_true',
+ help="Show file-level calls.")
+ parser.add_argument('-Y', '--summary', action='store_true',
+ help="Only show the total stack size.")
+ parser.add_argument('--build-dir',
+ help="Specify the relative build directory. Used to map object files \
+ to the correct source files.")
+ sys.exit(main(**vars(parser.parse_args())))
diff --git a/components/fs/littlefs/littlefs/scripts/structs.py b/components/fs/littlefs/littlefs/scripts/structs.py
new file mode 100755
index 00000000..e8d7193e
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/structs.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python3
+#
+# Script to find struct sizes.
+#
+
+import os
+import glob
+import itertools as it
+import subprocess as sp
+import shlex
+import re
+import csv
+import collections as co
+
+
+OBJ_PATHS = ['*.o']
+
+def collect(paths, **args):
+ decl_pattern = re.compile(
+ '^\s+(?P[0-9]+)'
+ '\s+(?P[0-9]+)'
+ '\s+.*'
+ '\s+(?P[^\s]+)$')
+ struct_pattern = re.compile(
+ '^(?:.*DW_TAG_(?P[a-z_]+).*'
+ '|^.*DW_AT_name.*:\s*(?P[^:\s]+)\s*'
+ '|^.*DW_AT_decl_file.*:\s*(?P[0-9]+)\s*'
+ '|^.*DW_AT_byte_size.*:\s*(?P[0-9]+)\s*)$')
+
+ results = co.defaultdict(lambda: 0)
+ for path in paths:
+ # find decl, we want to filter by structs in .h files
+ decls = {}
+ # note objdump-tool may contain extra args
+ cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace')
+ for line in proc.stdout:
+ # find file numbers
+ m = decl_pattern.match(line)
+ if m:
+ decls[int(m.group('no'))] = m.group('file')
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ # collect structs as we parse dwarf info
+ found = False
+ name = None
+ decl = None
+ size = None
+
+ # note objdump-tool may contain extra args
+ cmd = args['objdump_tool'] + ['--dwarf=info', path]
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE,
+ stderr=sp.PIPE if not args.get('verbose') else None,
+ universal_newlines=True,
+ errors='replace')
+ for line in proc.stdout:
+ # state machine here to find structs
+ m = struct_pattern.match(line)
+ if m:
+ if m.group('tag'):
+ if (name is not None
+ and decl is not None
+ and size is not None):
+ decl = decls.get(decl, '?')
+ results[(decl, name)] = size
+ found = (m.group('tag') == 'structure_type')
+ name = None
+ decl = None
+ size = None
+ elif found and m.group('name'):
+ name = m.group('name')
+ elif found and name and m.group('decl'):
+ decl = int(m.group('decl'))
+ elif found and name and m.group('size'):
+ size = int(m.group('size'))
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in proc.stderr:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ flat_results = []
+ for (file, struct), size in results.items():
+ # map to source files
+ if args.get('build_dir'):
+ file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
+ # only include structs declared in header files in the current
+ # directory, ignore internal-only # structs (these are represented
+ # in other measurements)
+ if not args.get('everything'):
+ if not file.endswith('.h'):
+ continue
+ # replace .o with .c, different scripts report .o/.c, we need to
+ # choose one if we want to deduplicate csv files
+ file = re.sub('\.o$', '.c', file)
+
+ flat_results.append((file, struct, size))
+
+ return flat_results
+
+
+def main(**args):
+ def openio(path, mode='r'):
+ if path == '-':
+ if 'r' in mode:
+ return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+ else:
+ return open(path, mode)
+
+ # find sizes
+ if not args.get('use', None):
+ # find .o files
+ paths = []
+ for path in args['obj_paths']:
+ if os.path.isdir(path):
+ path = path + '/*.o'
+
+ for path in glob.glob(path):
+ paths.append(path)
+
+ if not paths:
+ print('no .obj files found in %r?' % args['obj_paths'])
+ sys.exit(-1)
+
+ results = collect(paths, **args)
+ else:
+ with openio(args['use']) as f:
+ r = csv.DictReader(f)
+ results = [
+ ( result['file'],
+ result['name'],
+ int(result['struct_size']))
+ for result in r
+ if result.get('struct_size') not in {None, ''}]
+
+ total = 0
+ for _, _, size in results:
+ total += size
+
+ # find previous results?
+ if args.get('diff'):
+ try:
+ with openio(args['diff']) as f:
+ r = csv.DictReader(f)
+ prev_results = [
+ ( result['file'],
+ result['name'],
+ int(result['struct_size']))
+ for result in r
+ if result.get('struct_size') not in {None, ''}]
+ except FileNotFoundError:
+ prev_results = []
+
+ prev_total = 0
+ for _, _, size in prev_results:
+ prev_total += size
+
+ # write results to CSV
+ if args.get('output'):
+ merged_results = co.defaultdict(lambda: {})
+ other_fields = []
+
+ # merge?
+ if args.get('merge'):
+ try:
+ with openio(args['merge']) as f:
+ r = csv.DictReader(f)
+ for result in r:
+ file = result.pop('file', '')
+ struct = result.pop('name', '')
+ result.pop('struct_size', None)
+ merged_results[(file, struct)] = result
+ other_fields = result.keys()
+ except FileNotFoundError:
+ pass
+
+ for file, struct, size in results:
+ merged_results[(file, struct)]['struct_size'] = size
+
+ with openio(args['output'], 'w') as f:
+ w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
+ w.writeheader()
+ for (file, struct), result in sorted(merged_results.items()):
+ w.writerow({'file': file, 'name': struct, **result})
+
+ # print results
+ def dedup_entries(results, by='name'):
+ entries = co.defaultdict(lambda: 0)
+ for file, struct, size in results:
+ entry = (file if by == 'file' else struct)
+ entries[entry] += size
+ return entries
+
+ def diff_entries(olds, news):
+ diff = co.defaultdict(lambda: (0, 0, 0, 0))
+ for name, new in news.items():
+ diff[name] = (0, new, new, 1.0)
+ for name, old in olds.items():
+ _, new, _, _ = diff[name]
+ diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
+ return diff
+
+ def sorted_entries(entries):
+ if args.get('size_sort'):
+ return sorted(entries, key=lambda x: (-x[1], x))
+ elif args.get('reverse_size_sort'):
+ return sorted(entries, key=lambda x: (+x[1], x))
+ else:
+ return sorted(entries)
+
+ def sorted_diff_entries(entries):
+ if args.get('size_sort'):
+ return sorted(entries, key=lambda x: (-x[1][1], x))
+ elif args.get('reverse_size_sort'):
+ return sorted(entries, key=lambda x: (+x[1][1], x))
+ else:
+ return sorted(entries, key=lambda x: (-x[1][3], x))
+
+ def print_header(by=''):
+ if not args.get('diff'):
+ print('%-36s %7s' % (by, 'size'))
+ else:
+ print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
+
+ def print_entry(name, size):
+ print("%-36s %7d" % (name, size))
+
+ def print_diff_entry(name, old, new, diff, ratio):
+ print("%-36s %7s %7s %+7d%s" % (name,
+ old or "-",
+ new or "-",
+ diff,
+ ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+ def print_entries(by='name'):
+ entries = dedup_entries(results, by=by)
+
+ if not args.get('diff'):
+ print_header(by=by)
+ for name, size in sorted_entries(entries.items()):
+ print_entry(name, size)
+ else:
+ prev_entries = dedup_entries(prev_results, by=by)
+ diff = diff_entries(prev_entries, entries)
+ print_header(by='%s (%d added, %d removed)' % (by,
+ sum(1 for old, _, _, _ in diff.values() if not old),
+ sum(1 for _, new, _, _ in diff.values() if not new)))
+ for name, (old, new, diff, ratio) in sorted_diff_entries(
+ diff.items()):
+ if ratio or args.get('all'):
+ print_diff_entry(name, old, new, diff, ratio)
+
+ def print_totals():
+ if not args.get('diff'):
+ print_entry('TOTAL', total)
+ else:
+ ratio = (0.0 if not prev_total and not total
+ else 1.0 if not prev_total
+ else (total-prev_total)/prev_total)
+ print_diff_entry('TOTAL',
+ prev_total, total,
+ total-prev_total,
+ ratio)
+
+ if args.get('quiet'):
+ pass
+ elif args.get('summary'):
+ print_header()
+ print_totals()
+ elif args.get('files'):
+ print_entries(by='file')
+ print_totals()
+ else:
+ print_entries(by='name')
+ print_totals()
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Find struct sizes.")
+ parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
+ help="Description of where to find *.o files. May be a directory \
+ or a list of paths. Defaults to %r." % OBJ_PATHS)
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help="Output commands that run behind the scenes.")
+ parser.add_argument('-q', '--quiet', action='store_true',
+ help="Don't show anything, useful with -o.")
+ parser.add_argument('-o', '--output',
+ help="Specify CSV file to store results.")
+ parser.add_argument('-u', '--use',
+ help="Don't compile and find struct sizes, instead use this CSV file.")
+ parser.add_argument('-d', '--diff',
+ help="Specify CSV file to diff struct size against.")
+ parser.add_argument('-m', '--merge',
+ help="Merge with an existing CSV file when writing to output.")
+ parser.add_argument('-a', '--all', action='store_true',
+ help="Show all functions, not just the ones that changed.")
+ parser.add_argument('-A', '--everything', action='store_true',
+ help="Include builtin and libc specific symbols.")
+ parser.add_argument('-s', '--size-sort', action='store_true',
+ help="Sort by size.")
+ parser.add_argument('-S', '--reverse-size-sort', action='store_true',
+ help="Sort by size, but backwards.")
+ parser.add_argument('-F', '--files', action='store_true',
+ help="Show file-level struct sizes.")
+ parser.add_argument('-Y', '--summary', action='store_true',
+ help="Only show the total struct size.")
+ parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
+ help="Path to the objdump tool to use.")
+ parser.add_argument('--build-dir',
+ help="Specify the relative build directory. Used to map object files \
+ to the correct source files.")
+ sys.exit(main(**vars(parser.parse_args())))
diff --git a/components/fs/littlefs/littlefs/scripts/summary.py b/components/fs/littlefs/littlefs/scripts/summary.py
new file mode 100755
index 00000000..7ce769bf
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/summary.py
@@ -0,0 +1,279 @@
+#!/usr/bin/env python3
+#
+# Script to summarize the outputs of other scripts. Operates on CSV files.
+#
+
+import functools as ft
+import collections as co
+import os
+import csv
+import re
+import math as m
+
+# displayable fields
+Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
+FIELDS = [
+ # name, parse, accumulate, fmt, print, null
+ Field('code',
+ lambda r: int(r['code_size']),
+ sum,
+ lambda r: r,
+ '%7s',
+ lambda r: r,
+ '-',
+ lambda old, new: (new-old)/old),
+ Field('data',
+ lambda r: int(r['data_size']),
+ sum,
+ lambda r: r,
+ '%7s',
+ lambda r: r,
+ '-',
+ lambda old, new: (new-old)/old),
+ Field('stack',
+ lambda r: float(r['stack_limit']),
+ max,
+ lambda r: r,
+ '%7s',
+ lambda r: '∞' if m.isinf(r) else int(r),
+ '-',
+ lambda old, new: (new-old)/old),
+ Field('structs',
+ lambda r: int(r['struct_size']),
+ sum,
+ lambda r: r,
+ '%8s',
+ lambda r: r,
+ '-',
+ lambda old, new: (new-old)/old),
+ Field('coverage',
+ lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
+ lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
+ lambda r: r[0]/r[1],
+ '%19s',
+ lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
+ '%11s %7s' % ('-', '-'),
+ lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
+]
+
+
+def main(**args):
+ def openio(path, mode='r'):
+ if path == '-':
+ if 'r' in mode:
+ return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+ else:
+ return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+ else:
+ return open(path, mode)
+
+ # find results
+ results = co.defaultdict(lambda: {})
+ for path in args.get('csv_paths', '-'):
+ try:
+ with openio(path) as f:
+ r = csv.DictReader(f)
+ for result in r:
+ file = result.pop('file', '')
+ name = result.pop('name', '')
+ prev = results[(file, name)]
+ for field in FIELDS:
+ try:
+ r = field.parse(result)
+ if field.name in prev:
+ results[(file, name)][field.name] = field.acc(
+ [prev[field.name], r])
+ else:
+ results[(file, name)][field.name] = r
+ except (KeyError, ValueError):
+ pass
+ except FileNotFoundError:
+ pass
+
+ # find fields
+ if args.get('all_fields'):
+ fields = FIELDS
+ elif args.get('fields') is not None:
+ fields_dict = {field.name: field for field in FIELDS}
+ fields = [fields_dict[f] for f in args['fields']]
+ else:
+ fields = []
+ for field in FIELDS:
+ if any(field.name in result for result in results.values()):
+ fields.append(field)
+
+ # find total for every field
+ total = {}
+ for result in results.values():
+ for field in fields:
+ if field.name in result and field.name in total:
+ total[field.name] = field.acc(
+ [total[field.name], result[field.name]])
+ elif field.name in result:
+ total[field.name] = result[field.name]
+
+ # find previous results?
+ if args.get('diff'):
+ prev_results = co.defaultdict(lambda: {})
+ try:
+ with openio(args['diff']) as f:
+ r = csv.DictReader(f)
+ for result in r:
+ file = result.pop('file', '')
+ name = result.pop('name', '')
+ prev = prev_results[(file, name)]
+ for field in FIELDS:
+ try:
+ r = field.parse(result)
+ if field.name in prev:
+ prev_results[(file, name)][field.name] = field.acc(
+ [prev[field.name], r])
+ else:
+ prev_results[(file, name)][field.name] = r
+ except (KeyError, ValueError):
+ pass
+ except FileNotFoundError:
+ pass
+
+ prev_total = {}
+ for result in prev_results.values():
+ for field in fields:
+ if field.name in result and field.name in prev_total:
+ prev_total[field.name] = field.acc(
+ [prev_total[field.name], result[field.name]])
+ elif field.name in result:
+ prev_total[field.name] = result[field.name]
+
+ # print results
+ def dedup_entries(results, by='name'):
+ entries = co.defaultdict(lambda: {})
+ for (file, func), result in results.items():
+ entry = (file if by == 'file' else func)
+ prev = entries[entry]
+ for field in fields:
+ if field.name in result and field.name in prev:
+ entries[entry][field.name] = field.acc(
+ [prev[field.name], result[field.name]])
+ elif field.name in result:
+ entries[entry][field.name] = result[field.name]
+ return entries
+
+ def sorted_entries(entries):
+ if args.get('sort') is not None:
+ field = {field.name: field for field in FIELDS}[args['sort']]
+ return sorted(entries, key=lambda x: (
+ -(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
+ elif args.get('reverse_sort') is not None:
+ field = {field.name: field for field in FIELDS}[args['reverse_sort']]
+ return sorted(entries, key=lambda x: (
+ +(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
+ else:
+ return sorted(entries)
+
+ def print_header(by=''):
+ if not args.get('diff'):
+ print('%-36s' % by, end='')
+ for field in fields:
+ print((' '+field.fmt) % field.name, end='')
+ print()
+ else:
+ print('%-36s' % by, end='')
+ for field in fields:
+ print((' '+field.fmt) % field.name, end='')
+ print(' %-9s' % '', end='')
+ print()
+
+ def print_entry(name, result):
+ print('%-36s' % name, end='')
+ for field in fields:
+ r = result.get(field.name)
+ if r is not None:
+ print((' '+field.fmt) % field.repr(r), end='')
+ else:
+ print((' '+field.fmt) % '-', end='')
+ print()
+
+ def print_diff_entry(name, old, new):
+ print('%-36s' % name, end='')
+ for field in fields:
+ n = new.get(field.name)
+ if n is not None:
+ print((' '+field.fmt) % field.repr(n), end='')
+ else:
+ print((' '+field.fmt) % '-', end='')
+ o = old.get(field.name)
+ ratio = (
+ 0.0 if m.isinf(o or 0) and m.isinf(n or 0)
+ else +float('inf') if m.isinf(n or 0)
+ else -float('inf') if m.isinf(o or 0)
+ else 0.0 if not o and not n
+ else +1.0 if not o
+ else -1.0 if not n
+ else field.ratio(o, n))
+ print(' %-9s' % (
+ '' if not ratio
+ else '(+∞%)' if ratio > 0 and m.isinf(ratio)
+ else '(-∞%)' if ratio < 0 and m.isinf(ratio)
+ else '(%+.1f%%)' % (100*ratio)), end='')
+ print()
+
+ def print_entries(by='name'):
+ entries = dedup_entries(results, by=by)
+
+ if not args.get('diff'):
+ print_header(by=by)
+ for name, result in sorted_entries(entries.items()):
+ print_entry(name, result)
+ else:
+ prev_entries = dedup_entries(prev_results, by=by)
+ print_header(by='%s (%d added, %d removed)' % (by,
+ sum(1 for name in entries if name not in prev_entries),
+ sum(1 for name in prev_entries if name not in entries)))
+ for name, result in sorted_entries(entries.items()):
+ if args.get('all') or result != prev_entries.get(name, {}):
+ print_diff_entry(name, prev_entries.get(name, {}), result)
+
+ def print_totals():
+ if not args.get('diff'):
+ print_entry('TOTAL', total)
+ else:
+ print_diff_entry('TOTAL', prev_total, total)
+
+ if args.get('summary'):
+ print_header()
+ print_totals()
+ elif args.get('files'):
+ print_entries(by='file')
+ print_totals()
+ else:
+ print_entries(by='name')
+ print_totals()
+
+
+if __name__ == "__main__":
+ import argparse
+ import sys
+ parser = argparse.ArgumentParser(
+ description="Summarize measurements")
+ parser.add_argument('csv_paths', nargs='*', default='-',
+ help="Description of where to find *.csv files. May be a directory \
+ or list of paths. *.csv files will be merged to show the total \
+ coverage.")
+ parser.add_argument('-d', '--diff',
+ help="Specify CSV file to diff against.")
+ parser.add_argument('-a', '--all', action='store_true',
+ help="Show all objects, not just the ones that changed.")
+ parser.add_argument('-e', '--all-fields', action='store_true',
+ help="Show all fields, even those with no results.")
+ parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
+ help="Comma separated list of fields to print, by default all fields \
+ that are found in the CSV files are printed.")
+ parser.add_argument('-s', '--sort',
+ help="Sort by this field.")
+ parser.add_argument('-S', '--reverse-sort',
+ help="Sort by this field, but backwards.")
+ parser.add_argument('-F', '--files', action='store_true',
+ help="Show file-level calls.")
+ parser.add_argument('-Y', '--summary', action='store_true',
+ help="Only show the totals.")
+ sys.exit(main(**vars(parser.parse_args())))
diff --git a/components/fs/littlefs/littlefs/scripts/test.py b/components/fs/littlefs/littlefs/scripts/test.py
new file mode 100755
index 00000000..c8196b36
--- /dev/null
+++ b/components/fs/littlefs/littlefs/scripts/test.py
@@ -0,0 +1,860 @@
+#!/usr/bin/env python3
+
+# This script manages littlefs tests, which are configured with
+# .toml files stored in the tests directory.
+#
+
+import toml
+import glob
+import re
+import os
+import io
+import itertools as it
+import collections.abc as abc
+import subprocess as sp
+import base64
+import sys
+import copy
+import shlex
+import pty
+import errno
+import signal
+
+TEST_PATHS = 'tests'
+RULES = """
+# add block devices to sources
+TESTSRC ?= $(SRC) $(wildcard bd/*.c)
+
+define FLATTEN
+%(path)s%%$(subst /,.,$(target)): $(target)
+ ./scripts/explode_asserts.py $$< -o $$@
+endef
+$(foreach target,$(TESTSRC),$(eval $(FLATTEN)))
+
+-include %(path)s*.d
+.SECONDARY:
+
+%(path)s.test: %(path)s.test.o \\
+ $(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t)
+ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
+
+# needed in case builddir is different
+%(path)s%%.o: %(path)s%%.c
+ $(CC) -c -MMD $(CFLAGS) $< -o $@
+"""
+COVERAGE_RULES = """
+%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
+
+# delete lingering coverage
+%(path)s.test: | %(path)s.info.clean
+.PHONY: %(path)s.info.clean
+%(path)s.info.clean:
+ rm -f %(path)s*.gcda
+
+# accumulate coverage info
+.PHONY: %(path)s.info
+%(path)s.info:
+ $(strip $(LCOV) -c \\
+ $(addprefix -d ,$(wildcard %(path)s*.gcda)) \\
+ --rc 'geninfo_adjust_src_path=$(shell pwd)' \\
+ -o $@)
+ $(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@
+ifdef COVERAGETARGET
+ $(strip $(LCOV) -a $@ \\
+ $(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\
+ -o $(COVERAGETARGET))
+endif
+"""
+GLOBALS = """
+//////////////// AUTOGENERATED TEST ////////////////
+#include "lfs.h"
+#include "bd/lfs_testbd.h"
+#include
+extern const char *lfs_testbd_path;
+extern uint32_t lfs_testbd_cycles;
+"""
+DEFINES = {
+ 'LFS_READ_SIZE': 16,
+ 'LFS_PROG_SIZE': 'LFS_READ_SIZE',
+ 'LFS_BLOCK_SIZE': 512,
+ 'LFS_BLOCK_COUNT': 1024,
+ 'LFS_BLOCK_CYCLES': -1,
+ 'LFS_CACHE_SIZE': '(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)',
+ 'LFS_LOOKAHEAD_SIZE': 16,
+ 'LFS_ERASE_VALUE': 0xff,
+ 'LFS_ERASE_CYCLES': 0,
+ 'LFS_BADBLOCK_BEHAVIOR': 'LFS_TESTBD_BADBLOCK_PROGERROR',
+}
+PROLOGUE = """
+ // prologue
+ __attribute__((unused)) lfs_t lfs;
+ __attribute__((unused)) lfs_testbd_t bd;
+ __attribute__((unused)) lfs_file_t file;
+ __attribute__((unused)) lfs_dir_t dir;
+ __attribute__((unused)) struct lfs_info info;
+ __attribute__((unused)) char path[1024];
+ __attribute__((unused)) uint8_t buffer[(1024 > LFS_BLOCK_SIZE * 4) ? (1024) : (LFS_BLOCK_SIZE * 4)];
+ __attribute__((unused)) lfs_size_t size;
+ __attribute__((unused)) int err;
+
+ __attribute__((unused)) const struct lfs_config cfg = {
+ .context = &bd,
+ .read = lfs_testbd_read,
+ .prog = lfs_testbd_prog,
+ .erase = lfs_testbd_erase,
+ .sync = lfs_testbd_sync,
+ .read_size = LFS_READ_SIZE,
+ .prog_size = LFS_PROG_SIZE,
+ .block_size = LFS_BLOCK_SIZE,
+ .block_count = LFS_BLOCK_COUNT,
+ .block_cycles = LFS_BLOCK_CYCLES,
+ .cache_size = LFS_CACHE_SIZE,
+ .lookahead_size = LFS_LOOKAHEAD_SIZE,
+ };
+
+ __attribute__((unused)) const struct lfs_testbd_config bdcfg = {
+ .erase_value = LFS_ERASE_VALUE,
+ .erase_cycles = LFS_ERASE_CYCLES,
+ .badblock_behavior = LFS_BADBLOCK_BEHAVIOR,
+ .power_cycles = lfs_testbd_cycles,
+ };
+
+ lfs_testbd_createcfg(&cfg, lfs_testbd_path, &bdcfg) => 0;
+"""
+EPILOGUE = """
+ // epilogue
+ lfs_testbd_destroy(&cfg) => 0;
+"""
+PASS = '\033[32m✓\033[0m'
+FAIL = '\033[31m✗\033[0m'
+
+class TestFailure(Exception):
+ def __init__(self, case, returncode=None, stdout=None, assert_=None):
+ self.case = case
+ self.returncode = returncode
+ self.stdout = stdout
+ self.assert_ = assert_
+
+class TestCase:
+ def __init__(self, config, filter=filter,
+ suite=None, caseno=None, lineno=None, **_):
+ self.config = config
+ self.filter = filter
+ self.suite = suite
+ self.caseno = caseno
+ self.lineno = lineno
+
+ self.code = config['code']
+ self.code_lineno = config['code_lineno']
+ self.defines = config.get('define', {})
+ self.if_ = config.get('if', None)
+ self.in_ = config.get('in', None)
+
+ self.result = None
+
+ def __str__(self):
+ if hasattr(self, 'permno'):
+ if any(k not in self.case.defines for k in self.defines):
+ return '%s#%d#%d (%s)' % (
+ self.suite.name, self.caseno, self.permno, ', '.join(
+ '%s=%s' % (k, v) for k, v in self.defines.items()
+ if k not in self.case.defines))
+ else:
+ return '%s#%d#%d' % (
+ self.suite.name, self.caseno, self.permno)
+ else:
+ return '%s#%d' % (
+ self.suite.name, self.caseno)
+
+ def permute(self, class_=None, defines={}, permno=None, **_):
+ ncase = (class_ or type(self))(self.config)
+ for k, v in self.__dict__.items():
+ setattr(ncase, k, v)
+ ncase.case = self
+ ncase.perms = [ncase]
+ ncase.permno = permno
+ ncase.defines = defines
+ return ncase
+
+ def build(self, f, **_):
+ # prologue
+ for k, v in sorted(self.defines.items()):
+ if k not in self.suite.defines:
+ f.write('#define %s %s\n' % (k, v))
+
+ f.write('void test_case%d(%s) {' % (self.caseno, ','.join(
+ '\n'+8*' '+'__attribute__((unused)) intmax_t %s' % k
+ for k in sorted(self.perms[0].defines)
+ if k not in self.defines)))
+
+ f.write(PROLOGUE)
+ f.write('\n')
+ f.write(4*' '+'// test case %d\n' % self.caseno)
+ f.write(4*' '+'#line %d "%s"\n' % (self.code_lineno, self.suite.path))
+
+ # test case goes here
+ f.write(self.code)
+
+ # epilogue
+ f.write(EPILOGUE)
+ f.write('}\n')
+
+ for k, v in sorted(self.defines.items()):
+ if k not in self.suite.defines:
+ f.write('#undef %s\n' % k)
+
+ def shouldtest(self, **args):
+ if (self.filter is not None and
+ len(self.filter) >= 1 and
+ self.filter[0] != self.caseno):
+ return False
+ elif (self.filter is not None and
+ len(self.filter) >= 2 and
+ self.filter[1] != self.permno):
+ return False
+ elif args.get('no_internal') and self.in_ is not None:
+ return False
+ elif self.if_ is not None:
+ if_ = self.if_
+ while True:
+ for k, v in sorted(self.defines.items(),
+ key=lambda x: len(x[0]), reverse=True):
+ if k in if_:
+ if_ = if_.replace(k, '(%s)' % v)
+ break
+ else:
+ break
+ if_ = (
+ re.sub('(\&\&|\?)', ' and ',
+ re.sub('(\|\||:)', ' or ',
+ re.sub('!(?!=)', ' not ', if_))))
+ return eval(if_)
+ else:
+ return True
+
+ def test(self, exec=[], persist=False, cycles=None,
+ gdb=False, failure=None, disk=None, **args):
+ # build command
+ cmd = exec + ['./%s.test' % self.suite.path,
+ repr(self.caseno), repr(self.permno)]
+
+ # persist disk or keep in RAM for speed?
+ if persist:
+ if not disk:
+ disk = self.suite.path + '.disk'
+ if persist != 'noerase':
+ try:
+ with open(disk, 'w') as f:
+ f.truncate(0)
+ if args.get('verbose'):
+ print('truncate --size=0', disk)
+ except FileNotFoundError:
+ pass
+
+ cmd.append(disk)
+
+ # simulate power-loss after n cycles?
+ if cycles:
+ cmd.append(str(cycles))
+
+ # failed? drop into debugger?
+ if gdb and failure:
+ ncmd = ['gdb']
+ if gdb == 'assert':
+ ncmd.extend(['-ex', 'r'])
+ if failure.assert_:
+ ncmd.extend(['-ex', 'up 2'])
+ elif gdb == 'main':
+ ncmd.extend([
+ '-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
+ '-ex', 'r'])
+ ncmd.extend(['--args'] + cmd)
+
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in ncmd))
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ sys.exit(sp.call(ncmd))
+
+ # run test case!
+ mpty, spty = pty.openpty()
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd, stdout=spty, stderr=spty)
+ os.close(spty)
+ mpty = os.fdopen(mpty, 'r', 1)
+ stdout = []
+ assert_ = None
+ try:
+ while True:
+ try:
+ line = mpty.readline()
+ except OSError as e:
+ if e.errno == errno.EIO:
+ break
+ raise
+ if not line:
+ break;
+ stdout.append(line)
+ if args.get('verbose'):
+ sys.stdout.write(line)
+ # intercept asserts
+ m = re.match(
+ '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
+ .format('(?:\033\[[\d;]*.| )*', 'assert'),
+ line)
+ if m and assert_ is None:
+ try:
+ with open(m.group(1)) as f:
+ lineno = int(m.group(2))
+ line = (next(it.islice(f, lineno-1, None))
+ .strip('\n'))
+ assert_ = {
+ 'path': m.group(1),
+ 'line': line,
+ 'lineno': lineno,
+ 'message': m.group(3)}
+ except:
+ pass
+ except KeyboardInterrupt:
+ raise TestFailure(self, 1, stdout, None)
+ proc.wait()
+
+ # did we pass?
+ if proc.returncode != 0:
+ raise TestFailure(self, proc.returncode, stdout, assert_)
+ else:
+ return PASS
+
+class ValgrindTestCase(TestCase):
+ def __init__(self, config, **args):
+ self.leaky = config.get('leaky', False)
+ super().__init__(config, **args)
+
+ def shouldtest(self, **args):
+ return not self.leaky and super().shouldtest(**args)
+
+ def test(self, exec=[], **args):
+ verbose = args.get('verbose')
+ uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
+ exec = [
+ 'valgrind',
+ '--leak-check=full',
+ ] + (['--undef-value-errors=no'] if uninit else []) + [
+ ] + (['--track-origins=yes'] if not uninit else []) + [
+ '--error-exitcode=4',
+ '--error-limit=no',
+ ] + (['--num-callers=1'] if not verbose else []) + [
+ '-q'] + exec
+ return super().test(exec=exec, **args)
+
+class ReentrantTestCase(TestCase):
+ def __init__(self, config, **args):
+ self.reentrant = config.get('reentrant', False)
+ super().__init__(config, **args)
+
+ def shouldtest(self, **args):
+ return self.reentrant and super().shouldtest(**args)
+
+ def test(self, persist=False, gdb=False, failure=None, **args):
+ for cycles in it.count(1):
+ # clear disk first?
+ if cycles == 1 and persist != 'noerase':
+ persist = 'erase'
+ else:
+ persist = 'noerase'
+
+ # exact cycle we should drop into debugger?
+ if gdb and failure and failure.cycleno == cycles:
+ return super().test(gdb=gdb, persist=persist, cycles=cycles,
+ failure=failure, **args)
+
+ # run tests, but kill the program after prog/erase has
+ # been hit n cycles. We exit with a special return code if the
+ # program has not finished, since this isn't a test failure.
+ try:
+ return super().test(persist=persist, cycles=cycles, **args)
+ except TestFailure as nfailure:
+ if nfailure.returncode == 33:
+ continue
+ else:
+ nfailure.cycleno = cycles
+ raise
+
+class TestSuite:
+ def __init__(self, path, classes=[TestCase], defines={},
+ filter=None, **args):
+ self.name = os.path.basename(path)
+ if self.name.endswith('.toml'):
+ self.name = self.name[:-len('.toml')]
+ if args.get('build_dir'):
+ self.toml = path
+ self.path = args['build_dir'] + '/' + path
+ else:
+ self.toml = path
+ self.path = path
+ self.classes = classes
+ self.defines = defines.copy()
+ self.filter = filter
+
+ with open(self.toml) as f:
+ # load tests
+ config = toml.load(f)
+
+ # find line numbers
+ f.seek(0)
+ linenos = []
+ code_linenos = []
+ for i, line in enumerate(f):
+ if re.match(r'\[\[\s*case\s*\]\]', line):
+ linenos.append(i+1)
+ if re.match(r'code\s*=\s*(\'\'\'|""")', line):
+ code_linenos.append(i+2)
+
+ code_linenos.reverse()
+
+ # grab global config
+ for k, v in config.get('define', {}).items():
+ if k not in self.defines:
+ self.defines[k] = v
+ self.code = config.get('code', None)
+ if self.code is not None:
+ self.code_lineno = code_linenos.pop()
+
+ # create initial test cases
+ self.cases = []
+ for i, (case, lineno) in enumerate(zip(config['case'], linenos)):
+ # code lineno?
+ if 'code' in case:
+ case['code_lineno'] = code_linenos.pop()
+ # merge conditions if necessary
+ if 'if' in config and 'if' in case:
+ case['if'] = '(%s) && (%s)' % (config['if'], case['if'])
+ elif 'if' in config:
+ case['if'] = config['if']
+ # initialize test case
+ self.cases.append(TestCase(case, filter=filter,
+ suite=self, caseno=i+1, lineno=lineno, **args))
+
+ def __str__(self):
+ return self.name
+
+ def __lt__(self, other):
+ return self.name < other.name
+
+ def permute(self, **args):
+ for case in self.cases:
+ # lets find all parameterized definitions, in one of [args.D,
+ # suite.defines, case.defines, DEFINES]. Note that each of these
+ # can be either a dict of defines, or a list of dicts, expressing
+ # an initial set of permutations.
+ pending = [{}]
+ for inits in [self.defines, case.defines, DEFINES]:
+ if not isinstance(inits, list):
+ inits = [inits]
+
+ npending = []
+ for init, pinit in it.product(inits, pending):
+ ninit = pinit.copy()
+ for k, v in init.items():
+ if k not in ninit:
+ try:
+ ninit[k] = eval(v)
+ except:
+ ninit[k] = v
+ npending.append(ninit)
+
+ pending = npending
+
+ # expand permutations
+ pending = list(reversed(pending))
+ expanded = []
+ while pending:
+ perm = pending.pop()
+ for k, v in sorted(perm.items()):
+ if not isinstance(v, str) and isinstance(v, abc.Iterable):
+ for nv in reversed(v):
+ nperm = perm.copy()
+ nperm[k] = nv
+ pending.append(nperm)
+ break
+ else:
+ expanded.append(perm)
+
+ # generate permutations
+ case.perms = []
+ for i, (class_, defines) in enumerate(
+ it.product(self.classes, expanded)):
+ case.perms.append(case.permute(
+ class_, defines, permno=i+1, **args))
+
+ # also track non-unique defines
+ case.defines = {}
+ for k, v in case.perms[0].defines.items():
+ if all(perm.defines[k] == v for perm in case.perms):
+ case.defines[k] = v
+
+ # track all perms and non-unique defines
+ self.perms = []
+ for case in self.cases:
+ self.perms.extend(case.perms)
+
+ self.defines = {}
+ for k, v in self.perms[0].defines.items():
+ if all(perm.defines.get(k, None) == v for perm in self.perms):
+ self.defines[k] = v
+
+ return self.perms
+
+ def build(self, **args):
+ # build test files
+ tf = open(self.path + '.test.tc', 'w')
+ tf.write(GLOBALS)
+ if self.code is not None:
+ tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
+ tf.write(self.code)
+
+ tfs = {None: tf}
+ for case in self.cases:
+ if case.in_ not in tfs:
+ tfs[case.in_] = open(self.path+'.'+
+ re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
+ tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
+ with open(case.in_) as f:
+ for line in f:
+ tfs[case.in_].write(line)
+ tfs[case.in_].write('\n')
+ tfs[case.in_].write(GLOBALS)
+
+ tfs[case.in_].write('\n')
+ case.build(tfs[case.in_], **args)
+
+ tf.write('\n')
+ tf.write('const char *lfs_testbd_path;\n')
+ tf.write('uint32_t lfs_testbd_cycles;\n')
+ tf.write('int main(int argc, char **argv) {\n')
+ tf.write(4*' '+'int case_ = (argc > 1) ? atoi(argv[1]) : 0;\n')
+ tf.write(4*' '+'int perm = (argc > 2) ? atoi(argv[2]) : 0;\n')
+ tf.write(4*' '+'lfs_testbd_path = (argc > 3) ? argv[3] : NULL;\n')
+ tf.write(4*' '+'lfs_testbd_cycles = (argc > 4) ? atoi(argv[4]) : 0;\n')
+ for perm in self.perms:
+ # test declaration
+ tf.write(4*' '+'extern void test_case%d(%s);\n' % (
+ perm.caseno, ', '.join(
+ 'intmax_t %s' % k for k in sorted(perm.defines)
+ if k not in perm.case.defines)))
+ # test call
+ tf.write(4*' '+
+ 'if (argc < 3 || (case_ == %d && perm == %d)) {'
+ ' test_case%d(%s); '
+ '}\n' % (perm.caseno, perm.permno, perm.caseno, ', '.join(
+ str(v) for k, v in sorted(perm.defines.items())
+ if k not in perm.case.defines)))
+ tf.write('}\n')
+
+ for tf in tfs.values():
+ tf.close()
+
+ # write makefiles
+ with open(self.path + '.mk', 'w') as mk:
+ mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path))
+ mk.write('\n')
+
+ # add coverage hooks?
+ if args.get('coverage'):
+ mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict(
+ path=self.path))
+ mk.write('\n')
+
+ # add truly global defines globally
+ for k, v in sorted(self.defines.items()):
+ mk.write('%s.test: override CFLAGS += -D%s=%r\n'
+ % (self.path, k, v))
+
+ for path in tfs:
+ if path is None:
+ mk.write('%s: %s | %s\n' % (
+ self.path+'.test.c',
+ self.toml,
+ self.path+'.test.tc'))
+ else:
+ mk.write('%s: %s %s | %s\n' % (
+ self.path+'.'+path.replace('/', '.'),
+ self.toml,
+ path,
+ self.path+'.'+re.sub('(\.c)?$', '.tc',
+ path.replace('/', '.'))))
+ mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
+
+ self.makefile = self.path + '.mk'
+ self.target = self.path + '.test'
+ return self.makefile, self.target
+
+ def test(self, **args):
+ # run test suite!
+ if not args.get('verbose', True):
+ sys.stdout.write(self.name + ' ')
+ sys.stdout.flush()
+ for perm in self.perms:
+ if not perm.shouldtest(**args):
+ continue
+
+ try:
+ result = perm.test(**args)
+ except TestFailure as failure:
+ perm.result = failure
+ if not args.get('verbose', True):
+ sys.stdout.write(FAIL)
+ sys.stdout.flush()
+ if not args.get('keep_going'):
+ if not args.get('verbose', True):
+ sys.stdout.write('\n')
+ raise
+ else:
+ perm.result = PASS
+ if not args.get('verbose', True):
+ sys.stdout.write(PASS)
+ sys.stdout.flush()
+
+ if not args.get('verbose', True):
+ sys.stdout.write('\n')
+
+def main(**args):
+ # figure out explicit defines
+ defines = {}
+ for define in args['D']:
+ k, v, *_ = define.split('=', 2) + ['']
+ defines[k] = v
+
+ # and what class of TestCase to run
+ classes = []
+ if args.get('normal'):
+ classes.append(TestCase)
+ if args.get('reentrant'):
+ classes.append(ReentrantTestCase)
+ if args.get('valgrind'):
+ classes.append(ValgrindTestCase)
+ if not classes:
+ classes = [TestCase]
+
+ suites = []
+ for testpath in args['test_paths']:
+ # optionally specified test case/perm
+ testpath, *filter = testpath.split('#')
+ filter = [int(f) for f in filter]
+
+ # figure out the suite's toml file
+ if os.path.isdir(testpath):
+ testpath = testpath + '/*.toml'
+ elif os.path.isfile(testpath):
+ testpath = testpath
+ elif testpath.endswith('.toml'):
+ testpath = TEST_PATHS + '/' + testpath
+ else:
+ testpath = TEST_PATHS + '/' + testpath + '.toml'
+
+ # find tests
+ for path in glob.glob(testpath):
+ suites.append(TestSuite(path, classes, defines, filter, **args))
+
+ # sort for reproducibility
+ suites = sorted(suites)
+
+ # generate permutations
+ for suite in suites:
+ suite.permute(**args)
+
+ # build tests in parallel
+ print('====== building ======')
+ makefiles = []
+ targets = []
+ for suite in suites:
+ makefile, target = suite.build(**args)
+ makefiles.append(makefile)
+ targets.append(target)
+
+ cmd = (['make', '-f', 'Makefile'] +
+ list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
+ [target for target in targets])
+ mpty, spty = pty.openpty()
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd, stdout=spty, stderr=spty)
+ os.close(spty)
+ mpty = os.fdopen(mpty, 'r', 1)
+ stdout = []
+ while True:
+ try:
+ line = mpty.readline()
+ except OSError as e:
+ if e.errno == errno.EIO:
+ break
+ raise
+ if not line:
+ break;
+ stdout.append(line)
+ if args.get('verbose'):
+ sys.stdout.write(line)
+ # intercept warnings
+ m = re.match(
+ '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
+ .format('(?:\033\[[\d;]*.| )*', 'warning'),
+ line)
+ if m and not args.get('verbose'):
+ try:
+ with open(m.group(1)) as f:
+ lineno = int(m.group(2))
+ line = next(it.islice(f, lineno-1, None)).strip('\n')
+ sys.stdout.write(
+ "\033[01m{path}:{lineno}:\033[01;35mwarning:\033[m "
+ "{message}\n{line}\n\n".format(
+ path=m.group(1), line=line, lineno=lineno,
+ message=m.group(3)))
+ except:
+ pass
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in stdout:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ print('built %d test suites, %d test cases, %d permutations' % (
+ len(suites),
+ sum(len(suite.cases) for suite in suites),
+ sum(len(suite.perms) for suite in suites)))
+
+ total = 0
+ for suite in suites:
+ for perm in suite.perms:
+ total += perm.shouldtest(**args)
+ if total != sum(len(suite.perms) for suite in suites):
+ print('filtered down to %d permutations' % total)
+
+ # only requested to build?
+ if args.get('build'):
+ return 0
+
+ print('====== testing ======')
+ try:
+ for suite in suites:
+ suite.test(**args)
+ except TestFailure:
+ pass
+
+ print('====== results ======')
+ passed = 0
+ failed = 0
+ for suite in suites:
+ for perm in suite.perms:
+ if perm.result == PASS:
+ passed += 1
+ elif isinstance(perm.result, TestFailure):
+ sys.stdout.write(
+ "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
+ "{perm} failed\n".format(
+ perm=perm, path=perm.suite.path, lineno=perm.lineno,
+ returncode=perm.result.returncode or 0))
+ if perm.result.stdout:
+ if perm.result.assert_:
+ stdout = perm.result.stdout[:-1]
+ else:
+ stdout = perm.result.stdout
+ for line in stdout[-5:]:
+ sys.stdout.write(line)
+ if perm.result.assert_:
+ sys.stdout.write(
+ "\033[01m{path}:{lineno}:\033[01;31massert:\033[m "
+ "{message}\n{line}\n".format(
+ **perm.result.assert_))
+ sys.stdout.write('\n')
+ failed += 1
+
+ if args.get('coverage'):
+ # collect coverage info
+ # why -j1? lcov doesn't work in parallel because of gcov limitations
+ cmd = (['make', '-j1', '-f', 'Makefile'] +
+ list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
+ (['COVERAGETARGET=%s' % args['coverage']]
+ if isinstance(args['coverage'], str) else []) +
+ [suite.path + '.info' for suite in suites
+ if any(perm.result == PASS for perm in suite.perms)])
+ if args.get('verbose'):
+ print(' '.join(shlex.quote(c) for c in cmd))
+ proc = sp.Popen(cmd,
+ stdout=sp.PIPE if not args.get('verbose') else None,
+ stderr=sp.STDOUT if not args.get('verbose') else None,
+ universal_newlines=True)
+ stdout = []
+ for line in proc.stdout:
+ stdout.append(line)
+ proc.wait()
+ if proc.returncode != 0:
+ if not args.get('verbose'):
+ for line in stdout:
+ sys.stdout.write(line)
+ sys.exit(-1)
+
+ if args.get('gdb'):
+ failure = None
+ for suite in suites:
+ for perm in suite.perms:
+ if isinstance(perm.result, TestFailure):
+ failure = perm.result
+ if failure is not None:
+ print('======= gdb ======')
+ # drop into gdb
+ failure.case.test(failure=failure, **args)
+ sys.exit(0)
+
+ print('tests passed %d/%d (%.1f%%)' % (passed, total,
+ 100*(passed/total if total else 1.0)))
+ print('tests failed %d/%d (%.1f%%)' % (failed, total,
+ 100*(failed/total if total else 1.0)))
+ return 1 if failed > 0 else 0
+
+if __name__ == "__main__":
+ import argparse
+ parser = argparse.ArgumentParser(
+ description="Run parameterized tests in various configurations.")
+ parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
+ help="Description of test(s) to run. By default, this is all tests \
+ found in the \"{0}\" directory. Here, you can specify a different \
+ directory of tests, a specific file, a suite by name, and even \
+ specific test cases and permutations. For example \
+ \"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS))
+ parser.add_argument('-D', action='append', default=[],
+ help="Overriding parameter definitions.")
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help="Output everything that is happening.")
+ parser.add_argument('-k', '--keep-going', action='store_true',
+ help="Run all tests instead of stopping on first error. Useful for CI.")
+ parser.add_argument('-p', '--persist', choices=['erase', 'noerase'],
+ nargs='?', const='erase',
+ help="Store disk image in a file.")
+ parser.add_argument('-b', '--build', action='store_true',
+ help="Only build the tests, do not execute.")
+ parser.add_argument('-g', '--gdb', choices=['init', 'main', 'assert'],
+ nargs='?', const='assert',
+ help="Drop into gdb on test failure.")
+ parser.add_argument('--no-internal', action='store_true',
+ help="Don't run tests that require internal knowledge.")
+ parser.add_argument('-n', '--normal', action='store_true',
+ help="Run tests normally.")
+ parser.add_argument('-r', '--reentrant', action='store_true',
+ help="Run reentrant tests with simulated power-loss.")
+ parser.add_argument('--valgrind', action='store_true',
+ help="Run non-leaky tests under valgrind to check for memory leaks.")
+ parser.add_argument('--exec', default=[], type=lambda e: e.split(),
+ help="Run tests with another executable prefixed on the command line.")
+ parser.add_argument('--disk',
+ help="Specify a file to use for persistent/reentrant tests.")
+ parser.add_argument('--coverage', type=lambda x: x if x else True,
+ nargs='?', const='',
+ help="Collect coverage information during testing. This uses lcov/gcov \
+ to accumulate coverage information into *.info files. May also \
+ a path to a *.info file to accumulate coverage info into.")
+ parser.add_argument('--build-dir',
+ help="Build relative to the specified directory instead of the \
+ current directory.")
+
+ sys.exit(main(**vars(parser.parse_args())))
diff --git a/components/fs/littlefs/littlefs/tests/test_alloc.toml b/components/fs/littlefs/littlefs/tests/test_alloc.toml
new file mode 100644
index 00000000..fa92da51
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_alloc.toml
@@ -0,0 +1,653 @@
+# allocator tests
+# note for these to work there are a number constraints on the device geometry
+if = 'LFS_BLOCK_CYCLES == -1'
+
+[[case]] # parallel allocation test
+define.FILES = 3
+define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
+code = '''
+ const char *names[FILES] = {"bacon", "eggs", "pancakes"};
+ lfs_file_t files[FILES];
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "breakfast") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int n = 0; n < FILES; n++) {
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_open(&lfs, &files[n], path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ }
+ for (int n = 0; n < FILES; n++) {
+ size = strlen(names[n]);
+ for (lfs_size_t i = 0; i < SIZE; i += size) {
+ lfs_file_write(&lfs, &files[n], names[n], size) => size;
+ }
+ }
+ for (int n = 0; n < FILES; n++) {
+ lfs_file_close(&lfs, &files[n]) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int n = 0; n < FILES; n++) {
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ size = strlen(names[n]);
+ for (lfs_size_t i = 0; i < SIZE; i += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ assert(memcmp(buffer, names[n], size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # serial allocation test
+define.FILES = 3
+define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
+code = '''
+ const char *names[FILES] = {"bacon", "eggs", "pancakes"};
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "breakfast") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ for (int n = 0; n < FILES; n++) {
+ lfs_mount(&lfs, &cfg) => 0;
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ size = strlen(names[n]);
+ memcpy(buffer, names[n], size);
+ for (int i = 0; i < SIZE; i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+ }
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int n = 0; n < FILES; n++) {
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ size = strlen(names[n]);
+ for (int i = 0; i < SIZE; i += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ assert(memcmp(buffer, names[n], size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # parallel allocation reuse test
+define.FILES = 3
+define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
+define.CYCLES = [1, 10]
+code = '''
+ const char *names[FILES] = {"bacon", "eggs", "pancakes"};
+ lfs_file_t files[FILES];
+
+ lfs_format(&lfs, &cfg) => 0;
+
+ for (int c = 0; c < CYCLES; c++) {
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "breakfast") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int n = 0; n < FILES; n++) {
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_open(&lfs, &files[n], path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ }
+ for (int n = 0; n < FILES; n++) {
+ size = strlen(names[n]);
+ for (int i = 0; i < SIZE; i += size) {
+ lfs_file_write(&lfs, &files[n], names[n], size) => size;
+ }
+ }
+ for (int n = 0; n < FILES; n++) {
+ lfs_file_close(&lfs, &files[n]) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int n = 0; n < FILES; n++) {
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ size = strlen(names[n]);
+ for (int i = 0; i < SIZE; i += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ assert(memcmp(buffer, names[n], size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int n = 0; n < FILES; n++) {
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_remove(&lfs, path) => 0;
+ }
+ lfs_remove(&lfs, "breakfast") => 0;
+ lfs_unmount(&lfs) => 0;
+ }
+'''
+
+[[case]] # serial allocation reuse test
+define.FILES = 3
+define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
+define.CYCLES = [1, 10]
+code = '''
+ const char *names[FILES] = {"bacon", "eggs", "pancakes"};
+
+ lfs_format(&lfs, &cfg) => 0;
+
+ for (int c = 0; c < CYCLES; c++) {
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "breakfast") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ for (int n = 0; n < FILES; n++) {
+ lfs_mount(&lfs, &cfg) => 0;
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ size = strlen(names[n]);
+ memcpy(buffer, names[n], size);
+ for (int i = 0; i < SIZE; i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+ }
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int n = 0; n < FILES; n++) {
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ size = strlen(names[n]);
+ for (int i = 0; i < SIZE; i += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ assert(memcmp(buffer, names[n], size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int n = 0; n < FILES; n++) {
+ sprintf(path, "breakfast/%s", names[n]);
+ lfs_remove(&lfs, path) => 0;
+ }
+ lfs_remove(&lfs, "breakfast") => 0;
+ lfs_unmount(&lfs) => 0;
+ }
+'''
+
+[[case]] # exhaustion test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
+ size = strlen("exhaustion");
+ memcpy(buffer, "exhaustion", size);
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_sync(&lfs, &file) => 0;
+
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ lfs_ssize_t res;
+ while (true) {
+ res = lfs_file_write(&lfs, &file, buffer, size);
+ if (res < 0) {
+ break;
+ }
+
+ res => size;
+ }
+ res => LFS_ERR_NOSPC;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
+ size = strlen("exhaustion");
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "exhaustion", size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # exhaustion wraparound test
+define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / 3)'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ lfs_file_open(&lfs, &file, "padding", LFS_O_WRONLY | LFS_O_CREAT);
+ size = strlen("buffering");
+ memcpy(buffer, "buffering", size);
+ for (int i = 0; i < SIZE; i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_remove(&lfs, "padding") => 0;
+
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
+ size = strlen("exhaustion");
+ memcpy(buffer, "exhaustion", size);
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_sync(&lfs, &file) => 0;
+
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ lfs_ssize_t res;
+ while (true) {
+ res = lfs_file_write(&lfs, &file, buffer, size);
+ if (res < 0) {
+ break;
+ }
+
+ res => size;
+ }
+ res => LFS_ERR_NOSPC;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
+ size = strlen("exhaustion");
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "exhaustion", size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_remove(&lfs, "exhaustion") => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # dir exhaustion test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // find out max file size
+ lfs_mkdir(&lfs, "exhaustiondir") => 0;
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
+ int count = 0;
+ while (true) {
+ err = lfs_file_write(&lfs, &file, buffer, size);
+ if (err < 0) {
+ break;
+ }
+
+ count += 1;
+ }
+ err => LFS_ERR_NOSPC;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_remove(&lfs, "exhaustion") => 0;
+ lfs_remove(&lfs, "exhaustiondir") => 0;
+
+ // see if dir fits with max file size
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
+ for (int i = 0; i < count; i++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_mkdir(&lfs, "exhaustiondir") => 0;
+ lfs_remove(&lfs, "exhaustiondir") => 0;
+ lfs_remove(&lfs, "exhaustion") => 0;
+
+ // see if dir fits with > max file size
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
+ for (int i = 0; i < count+1; i++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_mkdir(&lfs, "exhaustiondir") => LFS_ERR_NOSPC;
+
+ lfs_remove(&lfs, "exhaustion") => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # what if we have a bad block during an allocation scan?
+in = "lfs.c"
+define.LFS_ERASE_CYCLES = 0xffffffff
+define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_READERROR'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ // first fill to exhaustion to find available space
+ lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ strcpy((char*)buffer, "waka");
+ size = strlen("waka");
+ lfs_size_t filesize = 0;
+ while (true) {
+ lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
+ assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
+ if (res == LFS_ERR_NOSPC) {
+ break;
+ }
+ filesize += size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ // now fill all but a couple of blocks of the filesystem with data
+ filesize -= 3*LFS_BLOCK_SIZE;
+ lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ strcpy((char*)buffer, "waka");
+ size = strlen("waka");
+ for (lfs_size_t i = 0; i < filesize/size; i++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ // also save head of file so we can error during lookahead scan
+ lfs_block_t fileblock = file.ctz.head;
+ lfs_unmount(&lfs) => 0;
+
+ // remount to force an alloc scan
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // but mark the head of our file as a "bad block", this is force our
+ // scan to bail early
+ lfs_testbd_setwear(&cfg, fileblock, 0xffffffff) => 0;
+ lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ strcpy((char*)buffer, "chomp");
+ size = strlen("chomp");
+ while (true) {
+ lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
+ assert(res == (lfs_ssize_t)size || res == LFS_ERR_CORRUPT);
+ if (res == LFS_ERR_CORRUPT) {
+ break;
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // now reverse the "bad block" and try to write the file again until we
+ // run out of space
+ lfs_testbd_setwear(&cfg, fileblock, 0) => 0;
+ lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ strcpy((char*)buffer, "chomp");
+ size = strlen("chomp");
+ while (true) {
+ lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
+ assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
+ if (res == LFS_ERR_NOSPC) {
+ break;
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+
+ // check that the disk isn't hurt
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "pacman", LFS_O_RDONLY) => 0;
+ strcpy((char*)buffer, "waka");
+ size = strlen("waka");
+ for (lfs_size_t i = 0; i < filesize/size; i++) {
+ uint8_t rbuffer[4];
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(memcmp(rbuffer, buffer, size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+
+# Below, I don't like these tests. They're fragile and depend _heavily_
+# on the geometry of the block device. But they are valuable. Eventually they
+# should be removed and replaced with generalized tests.
+
+[[case]] # chained dir exhaustion test
+define.LFS_BLOCK_SIZE = 512
+define.LFS_BLOCK_COUNT = 1024
+if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // find out max file size
+ lfs_mkdir(&lfs, "exhaustiondir") => 0;
+ for (int i = 0; i < 10; i++) {
+ sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
+ lfs_mkdir(&lfs, path) => 0;
+ }
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
+ int count = 0;
+ while (true) {
+ err = lfs_file_write(&lfs, &file, buffer, size);
+ if (err < 0) {
+ break;
+ }
+
+ count += 1;
+ }
+ err => LFS_ERR_NOSPC;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_remove(&lfs, "exhaustion") => 0;
+ lfs_remove(&lfs, "exhaustiondir") => 0;
+ for (int i = 0; i < 10; i++) {
+ sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
+ lfs_remove(&lfs, path) => 0;
+ }
+
+ // see that chained dir fails
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
+ for (int i = 0; i < count+1; i++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_sync(&lfs, &file) => 0;
+
+ for (int i = 0; i < 10; i++) {
+ sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
+ lfs_mkdir(&lfs, path) => 0;
+ }
+
+ lfs_mkdir(&lfs, "exhaustiondir") => LFS_ERR_NOSPC;
+
+ // shorten file to try a second chained dir
+ while (true) {
+ err = lfs_mkdir(&lfs, "exhaustiondir");
+ if (err != LFS_ERR_NOSPC) {
+ break;
+ }
+
+ lfs_ssize_t filesize = lfs_file_size(&lfs, &file);
+ filesize > 0 => true;
+
+ lfs_file_truncate(&lfs, &file, filesize - size) => 0;
+ lfs_file_sync(&lfs, &file) => 0;
+ }
+ err => 0;
+
+ lfs_mkdir(&lfs, "exhaustiondir2") => LFS_ERR_NOSPC;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # split dir test
+define.LFS_BLOCK_SIZE = 512
+define.LFS_BLOCK_COUNT = 1024
+if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // create one block hole for half a directory
+ lfs_file_open(&lfs, &file, "bump", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
+ memcpy(&buffer[i], "hi", 2);
+ }
+ lfs_file_write(&lfs, &file, buffer, cfg.block_size) => cfg.block_size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ for (lfs_size_t i = 0;
+ i < (cfg.block_count-4)*(cfg.block_size-8);
+ i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // remount to force reset of lookahead
+ lfs_unmount(&lfs) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // open hole
+ lfs_remove(&lfs, "bump") => 0;
+
+ lfs_mkdir(&lfs, "splitdir") => 0;
+ lfs_file_open(&lfs, &file, "splitdir/bump",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
+ memcpy(&buffer[i], "hi", 2);
+ }
+ lfs_file_write(&lfs, &file, buffer, 2*cfg.block_size) => LFS_ERR_NOSPC;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # outdated lookahead test
+define.LFS_BLOCK_SIZE = 512
+define.LFS_BLOCK_COUNT = 1024
+if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // fill completely with two files
+ lfs_file_open(&lfs, &file, "exhaustion1",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ for (lfs_size_t i = 0;
+ i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
+ i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "exhaustion2",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ for (lfs_size_t i = 0;
+ i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
+ i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // remount to force reset of lookahead
+ lfs_unmount(&lfs) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // rewrite one file
+ lfs_file_open(&lfs, &file, "exhaustion1",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_sync(&lfs, &file) => 0;
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ for (lfs_size_t i = 0;
+ i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
+ i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // rewrite second file, this requires lookahead does not
+ // use old population
+ lfs_file_open(&lfs, &file, "exhaustion2",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_sync(&lfs, &file) => 0;
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ for (lfs_size_t i = 0;
+ i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
+ i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # outdated lookahead and split dir test
+define.LFS_BLOCK_SIZE = 512
+define.LFS_BLOCK_COUNT = 1024
+if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // fill completely with two files
+ lfs_file_open(&lfs, &file, "exhaustion1",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ for (lfs_size_t i = 0;
+ i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
+ i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "exhaustion2",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ for (lfs_size_t i = 0;
+ i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
+ i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // remount to force reset of lookahead
+ lfs_unmount(&lfs) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // rewrite one file with a hole of one block
+ lfs_file_open(&lfs, &file, "exhaustion1",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_sync(&lfs, &file) => 0;
+ size = strlen("blahblahblahblah");
+ memcpy(buffer, "blahblahblahblah", size);
+ for (lfs_size_t i = 0;
+ i < ((cfg.block_count-2)/2 - 1)*(cfg.block_size-8);
+ i += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // try to allocate a directory, should fail!
+ lfs_mkdir(&lfs, "split") => LFS_ERR_NOSPC;
+
+ // file should not fail
+ lfs_file_open(&lfs, &file, "notasplit",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "hi", 2) => 2;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_attrs.toml b/components/fs/littlefs/littlefs/tests/test_attrs.toml
new file mode 100644
index 00000000..db8d0c7e
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_attrs.toml
@@ -0,0 +1,304 @@
+[[case]] # set/get attribute
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "hello") => 0;
+ lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
+ lfs_file_close(&lfs, &file);
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ memset(buffer, 0, sizeof(buffer));
+ lfs_setattr(&lfs, "hello", 'A', "aaaa", 4) => 0;
+ lfs_setattr(&lfs, "hello", 'B', "bbbbbb", 6) => 0;
+ lfs_setattr(&lfs, "hello", 'C', "ccccc", 5) => 0;
+ lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 6;
+ lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "bbbbbb", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_setattr(&lfs, "hello", 'B', "", 0) => 0;
+ lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 0;
+ lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_removeattr(&lfs, "hello", 'B') => 0;
+ lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => LFS_ERR_NOATTR;
+ lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_setattr(&lfs, "hello", 'B', "dddddd", 6) => 0;
+ lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 6;
+ lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "dddddd", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_setattr(&lfs, "hello", 'B', "eee", 3) => 0;
+ lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 3;
+ lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "eee\0\0\0", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_setattr(&lfs, "hello", 'A', buffer, LFS_ATTR_MAX+1) => LFS_ERR_NOSPC;
+ lfs_setattr(&lfs, "hello", 'B', "fffffffff", 9) => 0;
+ lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 9;
+ lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
+
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ memset(buffer, 0, sizeof(buffer));
+ lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "hello", 'B', buffer+4, 9) => 9;
+ lfs_getattr(&lfs, "hello", 'C', buffer+13, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "fffffffff", 9) => 0;
+ memcmp(buffer+13, "ccccc", 5) => 0;
+
+ lfs_file_open(&lfs, &file, "hello/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => strlen("hello");
+ memcmp(buffer, "hello", strlen("hello")) => 0;
+ lfs_file_close(&lfs, &file);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # set/get root attribute
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "hello") => 0;
+ lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
+ lfs_file_close(&lfs, &file);
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ memset(buffer, 0, sizeof(buffer));
+ lfs_setattr(&lfs, "/", 'A', "aaaa", 4) => 0;
+ lfs_setattr(&lfs, "/", 'B', "bbbbbb", 6) => 0;
+ lfs_setattr(&lfs, "/", 'C', "ccccc", 5) => 0;
+ lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 6;
+ lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "bbbbbb", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_setattr(&lfs, "/", 'B', "", 0) => 0;
+ lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 0;
+ lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_removeattr(&lfs, "/", 'B') => 0;
+ lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => LFS_ERR_NOATTR;
+ lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_setattr(&lfs, "/", 'B', "dddddd", 6) => 0;
+ lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 6;
+ lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "dddddd", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_setattr(&lfs, "/", 'B', "eee", 3) => 0;
+ lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 3;
+ lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "eee\0\0\0", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ lfs_setattr(&lfs, "/", 'A', buffer, LFS_ATTR_MAX+1) => LFS_ERR_NOSPC;
+ lfs_setattr(&lfs, "/", 'B', "fffffffff", 9) => 0;
+ lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 9;
+ lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ memset(buffer, 0, sizeof(buffer));
+ lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
+ lfs_getattr(&lfs, "/", 'B', buffer+4, 9) => 9;
+ lfs_getattr(&lfs, "/", 'C', buffer+13, 5) => 5;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "fffffffff", 9) => 0;
+ memcmp(buffer+13, "ccccc", 5) => 0;
+
+ lfs_file_open(&lfs, &file, "hello/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => strlen("hello");
+ memcmp(buffer, "hello", strlen("hello")) => 0;
+ lfs_file_close(&lfs, &file);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # set/get file attribute
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "hello") => 0;
+ lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
+ lfs_file_close(&lfs, &file);
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ memset(buffer, 0, sizeof(buffer));
+ struct lfs_attr attrs1[] = {
+ {'A', buffer, 4},
+ {'B', buffer+4, 6},
+ {'C', buffer+10, 5},
+ };
+ struct lfs_file_config cfg1 = {.attrs=attrs1, .attr_count=3};
+
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
+ memcpy(buffer, "aaaa", 4);
+ memcpy(buffer+4, "bbbbbb", 6);
+ memcpy(buffer+10, "ccccc", 5);
+ lfs_file_close(&lfs, &file) => 0;
+ memset(buffer, 0, 15);
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "bbbbbb", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ attrs1[1].size = 0;
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ memset(buffer, 0, 15);
+ attrs1[1].size = 6;
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ attrs1[1].size = 6;
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
+ memcpy(buffer+4, "dddddd", 6);
+ lfs_file_close(&lfs, &file) => 0;
+ memset(buffer, 0, 15);
+ attrs1[1].size = 6;
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "dddddd", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ attrs1[1].size = 3;
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
+ memcpy(buffer+4, "eee", 3);
+ lfs_file_close(&lfs, &file) => 0;
+ memset(buffer, 0, 15);
+ attrs1[1].size = 6;
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "eee\0\0\0", 6) => 0;
+ memcmp(buffer+10, "ccccc", 5) => 0;
+
+ attrs1[0].size = LFS_ATTR_MAX+1;
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1)
+ => LFS_ERR_NOSPC;
+
+ struct lfs_attr attrs2[] = {
+ {'A', buffer, 4},
+ {'B', buffer+4, 9},
+ {'C', buffer+13, 5},
+ };
+ struct lfs_file_config cfg2 = {.attrs=attrs2, .attr_count=3};
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDWR, &cfg2) => 0;
+ memcpy(buffer+4, "fffffffff", 9);
+ lfs_file_close(&lfs, &file) => 0;
+ attrs1[0].size = 4;
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ memset(buffer, 0, sizeof(buffer));
+ struct lfs_attr attrs3[] = {
+ {'A', buffer, 4},
+ {'B', buffer+4, 9},
+ {'C', buffer+13, 5},
+ };
+ struct lfs_file_config cfg3 = {.attrs=attrs3, .attr_count=3};
+
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg3) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ memcmp(buffer, "aaaa", 4) => 0;
+ memcmp(buffer+4, "fffffffff", 9) => 0;
+ memcmp(buffer+13, "ccccc", 5) => 0;
+
+ lfs_file_open(&lfs, &file, "hello/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => strlen("hello");
+ memcmp(buffer, "hello", strlen("hello")) => 0;
+ lfs_file_close(&lfs, &file);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # deferred file attributes
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "hello") => 0;
+ lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
+ lfs_file_close(&lfs, &file);
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_setattr(&lfs, "hello/hello", 'B', "fffffffff", 9) => 0;
+ lfs_setattr(&lfs, "hello/hello", 'C', "ccccc", 5) => 0;
+
+ memset(buffer, 0, sizeof(buffer));
+ struct lfs_attr attrs1[] = {
+ {'B', "gggg", 4},
+ {'C', "", 0},
+ {'D', "hhhh", 4},
+ };
+ struct lfs_file_config cfg1 = {.attrs=attrs1, .attr_count=3};
+
+ lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
+
+ lfs_getattr(&lfs, "hello/hello", 'B', buffer, 9) => 9;
+ lfs_getattr(&lfs, "hello/hello", 'C', buffer+9, 9) => 5;
+ lfs_getattr(&lfs, "hello/hello", 'D', buffer+18, 9) => LFS_ERR_NOATTR;
+ memcmp(buffer, "fffffffff", 9) => 0;
+ memcmp(buffer+9, "ccccc\0\0\0\0", 9) => 0;
+ memcmp(buffer+18, "\0\0\0\0\0\0\0\0\0", 9) => 0;
+
+ lfs_file_sync(&lfs, &file) => 0;
+ lfs_getattr(&lfs, "hello/hello", 'B', buffer, 9) => 4;
+ lfs_getattr(&lfs, "hello/hello", 'C', buffer+9, 9) => 0;
+ lfs_getattr(&lfs, "hello/hello", 'D', buffer+18, 9) => 4;
+ memcmp(buffer, "gggg\0\0\0\0\0", 9) => 0;
+ memcmp(buffer+9, "\0\0\0\0\0\0\0\0\0", 9) => 0;
+ memcmp(buffer+18, "hhhh\0\0\0\0\0", 9) => 0;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_badblocks.toml b/components/fs/littlefs/littlefs/tests/test_badblocks.toml
new file mode 100644
index 00000000..06967a67
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_badblocks.toml
@@ -0,0 +1,241 @@
+# bad blocks with block cycles should be tested in test_relocations
+if = 'LFS_BLOCK_CYCLES == -1'
+
+[[case]] # single bad blocks
+define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
+define.LFS_ERASE_CYCLES = 0xffffffff
+define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
+define.LFS_BADBLOCK_BEHAVIOR = [
+ 'LFS_TESTBD_BADBLOCK_PROGERROR',
+ 'LFS_TESTBD_BADBLOCK_ERASEERROR',
+ 'LFS_TESTBD_BADBLOCK_READERROR',
+ 'LFS_TESTBD_BADBLOCK_PROGNOOP',
+ 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+]
+define.NAMEMULT = 64
+define.FILEMULT = 1
+code = '''
+ for (lfs_block_t badblock = 2; badblock < LFS_BLOCK_COUNT; badblock++) {
+ lfs_testbd_setwear(&cfg, badblock-1, 0) => 0;
+ lfs_testbd_setwear(&cfg, badblock, 0xffffffff) => 0;
+
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 1; i < 10; i++) {
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j] = '0'+i;
+ }
+ buffer[NAMEMULT] = '\0';
+ lfs_mkdir(&lfs, (char*)buffer) => 0;
+
+ buffer[NAMEMULT] = '/';
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j+NAMEMULT+1] = '0'+i;
+ }
+ buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_open(&lfs, &file, (char*)buffer,
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+
+ size = NAMEMULT;
+ for (int j = 0; j < i*FILEMULT; j++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 1; i < 10; i++) {
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j] = '0'+i;
+ }
+ buffer[NAMEMULT] = '\0';
+ lfs_stat(&lfs, (char*)buffer, &info) => 0;
+ info.type => LFS_TYPE_DIR;
+
+ buffer[NAMEMULT] = '/';
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j+NAMEMULT+1] = '0'+i;
+ }
+ buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
+
+ size = NAMEMULT;
+ for (int j = 0; j < i*FILEMULT; j++) {
+ uint8_t rbuffer[1024];
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(buffer, rbuffer, size) => 0;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+ }
+'''
+
+[[case]] # region corruption (causes cascading failures)
+define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
+define.LFS_ERASE_CYCLES = 0xffffffff
+define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
+define.LFS_BADBLOCK_BEHAVIOR = [
+ 'LFS_TESTBD_BADBLOCK_PROGERROR',
+ 'LFS_TESTBD_BADBLOCK_ERASEERROR',
+ 'LFS_TESTBD_BADBLOCK_READERROR',
+ 'LFS_TESTBD_BADBLOCK_PROGNOOP',
+ 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+]
+define.NAMEMULT = 64
+define.FILEMULT = 1
+code = '''
+ for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
+ lfs_testbd_setwear(&cfg, i+2, 0xffffffff) => 0;
+ }
+
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 1; i < 10; i++) {
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j] = '0'+i;
+ }
+ buffer[NAMEMULT] = '\0';
+ lfs_mkdir(&lfs, (char*)buffer) => 0;
+
+ buffer[NAMEMULT] = '/';
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j+NAMEMULT+1] = '0'+i;
+ }
+ buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_open(&lfs, &file, (char*)buffer,
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+
+ size = NAMEMULT;
+ for (int j = 0; j < i*FILEMULT; j++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 1; i < 10; i++) {
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j] = '0'+i;
+ }
+ buffer[NAMEMULT] = '\0';
+ lfs_stat(&lfs, (char*)buffer, &info) => 0;
+ info.type => LFS_TYPE_DIR;
+
+ buffer[NAMEMULT] = '/';
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j+NAMEMULT+1] = '0'+i;
+ }
+ buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
+
+ size = NAMEMULT;
+ for (int j = 0; j < i*FILEMULT; j++) {
+ uint8_t rbuffer[1024];
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(buffer, rbuffer, size) => 0;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # alternating corruption (causes cascading failures)
+define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
+define.LFS_ERASE_CYCLES = 0xffffffff
+define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
+define.LFS_BADBLOCK_BEHAVIOR = [
+ 'LFS_TESTBD_BADBLOCK_PROGERROR',
+ 'LFS_TESTBD_BADBLOCK_ERASEERROR',
+ 'LFS_TESTBD_BADBLOCK_READERROR',
+ 'LFS_TESTBD_BADBLOCK_PROGNOOP',
+ 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+]
+define.NAMEMULT = 64
+define.FILEMULT = 1
+code = '''
+ for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
+ lfs_testbd_setwear(&cfg, (2*i) + 2, 0xffffffff) => 0;
+ }
+
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 1; i < 10; i++) {
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j] = '0'+i;
+ }
+ buffer[NAMEMULT] = '\0';
+ lfs_mkdir(&lfs, (char*)buffer) => 0;
+
+ buffer[NAMEMULT] = '/';
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j+NAMEMULT+1] = '0'+i;
+ }
+ buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_open(&lfs, &file, (char*)buffer,
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+
+ size = NAMEMULT;
+ for (int j = 0; j < i*FILEMULT; j++) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 1; i < 10; i++) {
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j] = '0'+i;
+ }
+ buffer[NAMEMULT] = '\0';
+ lfs_stat(&lfs, (char*)buffer, &info) => 0;
+ info.type => LFS_TYPE_DIR;
+
+ buffer[NAMEMULT] = '/';
+ for (int j = 0; j < NAMEMULT; j++) {
+ buffer[j+NAMEMULT+1] = '0'+i;
+ }
+ buffer[2*NAMEMULT+1] = '\0';
+ lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
+
+ size = NAMEMULT;
+ for (int j = 0; j < i*FILEMULT; j++) {
+ uint8_t rbuffer[1024];
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(buffer, rbuffer, size) => 0;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+# other corner cases
+[[case]] # bad superblocks (corrupt 1 or 0)
+define.LFS_ERASE_CYCLES = 0xffffffff
+define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
+define.LFS_BADBLOCK_BEHAVIOR = [
+ 'LFS_TESTBD_BADBLOCK_PROGERROR',
+ 'LFS_TESTBD_BADBLOCK_ERASEERROR',
+ 'LFS_TESTBD_BADBLOCK_READERROR',
+ 'LFS_TESTBD_BADBLOCK_PROGNOOP',
+ 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+]
+code = '''
+ lfs_testbd_setwear(&cfg, 0, 0xffffffff) => 0;
+ lfs_testbd_setwear(&cfg, 1, 0xffffffff) => 0;
+
+ lfs_format(&lfs, &cfg) => LFS_ERR_NOSPC;
+ lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_dirs.toml b/components/fs/littlefs/littlefs/tests/test_dirs.toml
new file mode 100644
index 00000000..270f4f8e
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_dirs.toml
@@ -0,0 +1,838 @@
+[[case]] # root
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # many directory creation
+define.N = 'range(0, 100, 3)'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "dir%03d", i);
+ lfs_mkdir(&lfs, path) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "dir%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # many directory removal
+define.N = 'range(3, 100, 11)'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "removeme%03d", i);
+ lfs_mkdir(&lfs, path) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "removeme%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs);
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "removeme%03d", i);
+ lfs_remove(&lfs, path) => 0;
+ }
+ lfs_unmount(&lfs);
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # many directory rename
+define.N = 'range(3, 100, 11)'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "test%03d", i);
+ lfs_mkdir(&lfs, path) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "test%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs);
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ char oldpath[128];
+ char newpath[128];
+ sprintf(oldpath, "test%03d", i);
+ sprintf(newpath, "tedd%03d", i);
+ lfs_rename(&lfs, oldpath, newpath) => 0;
+ }
+ lfs_unmount(&lfs);
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "tedd%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs);
+'''
+
+[[case]] # reentrant many directory creation/rename/removal
+define.N = [5, 11]
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hi%03d", i);
+ err = lfs_mkdir(&lfs, path);
+ assert(err == 0 || err == LFS_ERR_EXIST);
+ }
+
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hello%03d", i);
+ err = lfs_remove(&lfs, path);
+ assert(err == 0 || err == LFS_ERR_NOENT);
+ }
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hi%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ for (int i = 0; i < N; i++) {
+ char oldpath[128];
+ char newpath[128];
+ sprintf(oldpath, "hi%03d", i);
+ sprintf(newpath, "hello%03d", i);
+ // YES this can overwrite an existing newpath
+ lfs_rename(&lfs, oldpath, newpath) => 0;
+ }
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hello%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hello%03d", i);
+ lfs_remove(&lfs, path) => 0;
+ }
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # file creation
+define.N = 'range(3, 100, 11)'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "file%03d", i);
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "file%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs);
+'''
+
+[[case]] # file removal
+define.N = 'range(0, 100, 3)'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "removeme%03d", i);
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "removeme%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs);
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "removeme%03d", i);
+ lfs_remove(&lfs, path) => 0;
+ }
+ lfs_unmount(&lfs);
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # file rename
+define.N = 'range(0, 100, 3)'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "test%03d", i);
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "test%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs);
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ char oldpath[128];
+ char newpath[128];
+ sprintf(oldpath, "test%03d", i);
+ sprintf(newpath, "tedd%03d", i);
+ lfs_rename(&lfs, oldpath, newpath) => 0;
+ }
+ lfs_unmount(&lfs);
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "tedd%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs);
+'''
+
+[[case]] # reentrant file creation/rename/removal
+define.N = [5, 25]
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hi%03d", i);
+ lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hello%03d", i);
+ err = lfs_remove(&lfs, path);
+ assert(err == 0 || err == LFS_ERR_NOENT);
+ }
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hi%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ for (int i = 0; i < N; i++) {
+ char oldpath[128];
+ char newpath[128];
+ sprintf(oldpath, "hi%03d", i);
+ sprintf(newpath, "hello%03d", i);
+ // YES this can overwrite an existing newpath
+ lfs_rename(&lfs, oldpath, newpath) => 0;
+ }
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hello%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "hello%03d", i);
+ lfs_remove(&lfs, path) => 0;
+ }
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # nested directories
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "potato") => 0;
+ lfs_file_open(&lfs, &file, "burito",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "potato/baked") => 0;
+ lfs_mkdir(&lfs, "potato/sweet") => 0;
+ lfs_mkdir(&lfs, "potato/fried") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "potato") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "baked") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "fried") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "sweet") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // try removing?
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_remove(&lfs, "potato") => LFS_ERR_NOTEMPTY;
+ lfs_unmount(&lfs) => 0;
+
+ // try renaming?
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "potato", "coldpotato") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "coldpotato", "warmpotato") => 0;
+ lfs_rename(&lfs, "warmpotato", "hotpotato") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_remove(&lfs, "potato") => LFS_ERR_NOENT;
+ lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOENT;
+ lfs_remove(&lfs, "warmpotato") => LFS_ERR_NOENT;
+ lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
+ lfs_unmount(&lfs) => 0;
+
+ // try cross-directory renaming
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "coldpotato") => 0;
+ lfs_rename(&lfs, "hotpotato/baked", "coldpotato/baked") => 0;
+ lfs_rename(&lfs, "coldpotato", "hotpotato") => LFS_ERR_NOTEMPTY;
+ lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
+ lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
+ lfs_rename(&lfs, "hotpotato/fried", "coldpotato/fried") => 0;
+ lfs_rename(&lfs, "coldpotato", "hotpotato") => LFS_ERR_NOTEMPTY;
+ lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
+ lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
+ lfs_rename(&lfs, "hotpotato/sweet", "coldpotato/sweet") => 0;
+ lfs_rename(&lfs, "coldpotato", "hotpotato") => 0;
+ lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOENT;
+ lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "hotpotato") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "baked") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "fried") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "sweet") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // final remove
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
+ lfs_remove(&lfs, "hotpotato/baked") => 0;
+ lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
+ lfs_remove(&lfs, "hotpotato/fried") => 0;
+ lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
+ lfs_remove(&lfs, "hotpotato/sweet") => 0;
+ lfs_remove(&lfs, "hotpotato") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ info.type => LFS_TYPE_DIR;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "burito") == 0);
+ info.type => LFS_TYPE_REG;
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # recursive remove
+define.N = [10, 100]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "prickly-pear") => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "prickly-pear/cactus%03d", i);
+ lfs_mkdir(&lfs, path) => 0;
+ }
+ lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "cactus%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, path) == 0);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs);
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY;
+
+ lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "cactus%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, path) == 0);
+ sprintf(path, "prickly-pear/%s", info.name);
+ lfs_remove(&lfs, path) => 0;
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_remove(&lfs, "prickly-pear") => 0;
+ lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # other error cases
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "potato") => 0;
+ lfs_file_open(&lfs, &file, "burito",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+
+ lfs_mkdir(&lfs, "potato") => LFS_ERR_EXIST;
+ lfs_mkdir(&lfs, "burito") => LFS_ERR_EXIST;
+ lfs_file_open(&lfs, &file, "burito",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
+ lfs_file_open(&lfs, &file, "potato",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
+ lfs_dir_open(&lfs, &dir, "tomato") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "burito") => LFS_ERR_NOTDIR;
+ lfs_file_open(&lfs, &file, "tomato", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "potato", LFS_O_RDONLY) => LFS_ERR_ISDIR;
+ lfs_file_open(&lfs, &file, "tomato", LFS_O_WRONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "potato", LFS_O_WRONLY) => LFS_ERR_ISDIR;
+ lfs_file_open(&lfs, &file, "potato",
+ LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_ISDIR;
+
+ lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
+ lfs_file_open(&lfs, &file, "/",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
+ lfs_file_open(&lfs, &file, "/", LFS_O_RDONLY) => LFS_ERR_ISDIR;
+ lfs_file_open(&lfs, &file, "/", LFS_O_WRONLY) => LFS_ERR_ISDIR;
+ lfs_file_open(&lfs, &file, "/",
+ LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_ISDIR;
+
+ // check that errors did not corrupt directory
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, "burito") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "potato") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_unmount(&lfs) => 0;
+
+ // or on disk
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, ".") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "..") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_REG);
+ assert(strcmp(info.name, "burito") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(info.type == LFS_TYPE_DIR);
+ assert(strcmp(info.name, "potato") == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # directory seek
+define.COUNT = [4, 128, 132]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "hello") => 0;
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "hello/kitty%03d", i);
+ lfs_mkdir(&lfs, path) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ for (int j = 2; j < COUNT; j++) {
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "hello") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_soff_t pos;
+ for (int i = 0; i < j; i++) {
+ sprintf(path, "kitty%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ pos = lfs_dir_tell(&lfs, &dir);
+ assert(pos >= 0);
+ }
+
+ lfs_dir_seek(&lfs, &dir, pos) => 0;
+ sprintf(path, "kitty%03d", j);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_dir_rewind(&lfs, &dir) => 0;
+ sprintf(path, "kitty%03d", 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_dir_seek(&lfs, &dir, pos) => 0;
+ sprintf(path, "kitty%03d", j);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ }
+'''
+
+[[case]] # root seek
+define.COUNT = [4, 128, 132]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "hi%03d", i);
+ lfs_mkdir(&lfs, path) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ for (int j = 2; j < COUNT; j++) {
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_soff_t pos;
+ for (int i = 0; i < j; i++) {
+ sprintf(path, "hi%03d", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ pos = lfs_dir_tell(&lfs, &dir);
+ assert(pos >= 0);
+ }
+
+ lfs_dir_seek(&lfs, &dir, pos) => 0;
+ sprintf(path, "hi%03d", j);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_dir_rewind(&lfs, &dir) => 0;
+ sprintf(path, "hi%03d", 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_dir_seek(&lfs, &dir, pos) => 0;
+ sprintf(path, "hi%03d", j);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ }
+'''
+
diff --git a/components/fs/littlefs/littlefs/tests/test_entries.toml b/components/fs/littlefs/littlefs/tests/test_entries.toml
new file mode 100644
index 00000000..81e175f5
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_entries.toml
@@ -0,0 +1,611 @@
+# These tests are for some specific corner cases with neighboring inline files.
+# Note that these tests are intended for 512 byte inline sizes. They should
+# still pass with other inline sizes but wouldn't be testing anything.
+
+define.LFS_CACHE_SIZE = 512
+if = 'LFS_CACHE_SIZE % LFS_PROG_SIZE == 0 && LFS_CACHE_SIZE == 512'
+
+[[case]] # entry grow test
+code = '''
+ uint8_t wbuffer[1024];
+ uint8_t rbuffer[1024];
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // write hi0 20
+ sprintf(path, "hi0"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 20
+ sprintf(path, "hi1"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi2 20
+ sprintf(path, "hi2"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi3 20
+ sprintf(path, "hi3"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read hi1 20
+ sprintf(path, "hi1"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read hi0 20
+ sprintf(path, "hi0"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi2 20
+ sprintf(path, "hi2"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi3 20
+ sprintf(path, "hi3"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # entry shrink test
+code = '''
+ uint8_t wbuffer[1024];
+ uint8_t rbuffer[1024];
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // write hi0 20
+ sprintf(path, "hi0"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi2 20
+ sprintf(path, "hi2"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi3 20
+ sprintf(path, "hi3"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 20
+ sprintf(path, "hi1"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read hi0 20
+ sprintf(path, "hi0"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi1 20
+ sprintf(path, "hi1"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi2 20
+ sprintf(path, "hi2"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi3 20
+ sprintf(path, "hi3"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # entry spill test
+code = '''
+ uint8_t wbuffer[1024];
+ uint8_t rbuffer[1024];
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // write hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi2 200
+ sprintf(path, "hi2"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi3 200
+ sprintf(path, "hi3"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi2 200
+ sprintf(path, "hi2"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi3 200
+ sprintf(path, "hi3"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # entry push spill test
+code = '''
+ uint8_t wbuffer[1024];
+ uint8_t rbuffer[1024];
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // write hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 20
+ sprintf(path, "hi1"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi2 200
+ sprintf(path, "hi2"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi3 200
+ sprintf(path, "hi3"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read hi1 20
+ sprintf(path, "hi1"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi2 200
+ sprintf(path, "hi2"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi3 200
+ sprintf(path, "hi3"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # entry push spill two test
+code = '''
+ uint8_t wbuffer[1024];
+ uint8_t rbuffer[1024];
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // write hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 20
+ sprintf(path, "hi1"); size = 20;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi2 200
+ sprintf(path, "hi2"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi3 200
+ sprintf(path, "hi3"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi4 200
+ sprintf(path, "hi4"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read hi1 20
+ sprintf(path, "hi1"); size = 20;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi2 200
+ sprintf(path, "hi2"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi3 200
+ sprintf(path, "hi3"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi4 200
+ sprintf(path, "hi4"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # entry drop test
+code = '''
+ uint8_t wbuffer[1024];
+ uint8_t rbuffer[1024];
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // write hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi1 200
+ sprintf(path, "hi1"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi2 200
+ sprintf(path, "hi2"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ // write hi3 200
+ sprintf(path, "hi3"); size = 200;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_remove(&lfs, "hi1") => 0;
+ lfs_stat(&lfs, "hi1", &info) => LFS_ERR_NOENT;
+ // read hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi2 200
+ sprintf(path, "hi2"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi3 200
+ sprintf(path, "hi3"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_remove(&lfs, "hi2") => 0;
+ lfs_stat(&lfs, "hi2", &info) => LFS_ERR_NOENT;
+ // read hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ // read hi3 200
+ sprintf(path, "hi3"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_remove(&lfs, "hi3") => 0;
+ lfs_stat(&lfs, "hi3", &info) => LFS_ERR_NOENT;
+ // read hi0 200
+ sprintf(path, "hi0"); size = 200;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_remove(&lfs, "hi0") => 0;
+ lfs_stat(&lfs, "hi0", &info) => LFS_ERR_NOENT;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # create too big
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ memset(path, 'm', 200);
+ path[200] = '\0';
+
+ size = 400;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ uint8_t wbuffer[1024];
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ size = 400;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ uint8_t rbuffer[1024];
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # resize too big
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ memset(path, 'm', 200);
+ path[200] = '\0';
+
+ size = 40;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ uint8_t wbuffer[1024];
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ size = 40;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ uint8_t rbuffer[1024];
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ size = 400;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ memset(wbuffer, 'c', size);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ size = 400;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ memcmp(rbuffer, wbuffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_evil.toml b/components/fs/littlefs/littlefs/tests/test_evil.toml
new file mode 100644
index 00000000..920d3a0e
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_evil.toml
@@ -0,0 +1,288 @@
+# Tests for recovering from conditions which shouldn't normally
+# happen during normal operation of littlefs
+
+# invalid pointer tests (outside of block_count)
+
+[[case]] # invalid tail-pointer test
+define.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL']
+define.INVALSET = [0x3, 0x1, 0x2]
+in = "lfs.c"
+code = '''
+ // create littlefs
+ lfs_format(&lfs, &cfg) => 0;
+
+ // change tail-pointer to invalid pointers
+ lfs_init(&lfs, &cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
+ (lfs_block_t[2]){
+ (INVALSET & 0x1) ? 0xcccccccc : 0,
+ (INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
+ lfs_deinit(&lfs) => 0;
+
+ // test that mount fails gracefully
+ lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+'''
+
+[[case]] # invalid dir pointer test
+define.INVALSET = [0x3, 0x1, 0x2]
+in = "lfs.c"
+code = '''
+ // create littlefs
+ lfs_format(&lfs, &cfg) => 0;
+ // make a dir
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "dir_here") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // change the dir pointer to be invalid
+ lfs_init(&lfs, &cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ // make sure id 1 == our directory
+ lfs_dir_get(&lfs, &mdir,
+ LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("dir_here")), buffer)
+ => LFS_MKTAG(LFS_TYPE_DIR, 1, strlen("dir_here"));
+ assert(memcmp((char*)buffer, "dir_here", strlen("dir_here")) == 0);
+ // change dir pointer
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, 8),
+ (lfs_block_t[2]){
+ (INVALSET & 0x1) ? 0xcccccccc : 0,
+ (INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
+ lfs_deinit(&lfs) => 0;
+
+ // test that accessing our bad dir fails, note there's a number
+ // of ways to access the dir, some can fail, but some don't
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "dir_here", &info) => 0;
+ assert(strcmp(info.name, "dir_here") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_dir_open(&lfs, &dir, "dir_here") => LFS_ERR_CORRUPT;
+ lfs_stat(&lfs, "dir_here/file_here", &info) => LFS_ERR_CORRUPT;
+ lfs_dir_open(&lfs, &dir, "dir_here/dir_here") => LFS_ERR_CORRUPT;
+ lfs_file_open(&lfs, &file, "dir_here/file_here",
+ LFS_O_RDONLY) => LFS_ERR_CORRUPT;
+ lfs_file_open(&lfs, &file, "dir_here/file_here",
+ LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_CORRUPT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # invalid file pointer test
+in = "lfs.c"
+define.SIZE = [10, 1000, 100000] # faked file size
+code = '''
+ // create littlefs
+ lfs_format(&lfs, &cfg) => 0;
+ // make a file
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "file_here",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // change the file pointer to be invalid
+ lfs_init(&lfs, &cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ // make sure id 1 == our file
+ lfs_dir_get(&lfs, &mdir,
+ LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
+ => LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
+ assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
+ // change file pointer
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz)),
+ &(struct lfs_ctz){0xcccccccc, lfs_tole32(SIZE)}})) => 0;
+ lfs_deinit(&lfs) => 0;
+
+ // test that accessing our bad file fails, note there's a number
+ // of ways to access the dir, some can fail, but some don't
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "file_here", &info) => 0;
+ assert(strcmp(info.name, "file_here") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == SIZE);
+
+ lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // any allocs that traverse CTZ must unfortunately must fail
+ if (SIZE > 2*LFS_BLOCK_SIZE) {
+ lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # invalid pointer in CTZ skip-list test
+define.SIZE = ['2*LFS_BLOCK_SIZE', '3*LFS_BLOCK_SIZE', '4*LFS_BLOCK_SIZE']
+in = "lfs.c"
+code = '''
+ // create littlefs
+ lfs_format(&lfs, &cfg) => 0;
+ // make a file
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "file_here",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ for (int i = 0; i < SIZE; i++) {
+ char c = 'c';
+ lfs_file_write(&lfs, &file, &c, 1) => 1;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+ // change pointer in CTZ skip-list to be invalid
+ lfs_init(&lfs, &cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ // make sure id 1 == our file and get our CTZ structure
+ lfs_dir_get(&lfs, &mdir,
+ LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
+ => LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
+ assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
+ struct lfs_ctz ctz;
+ lfs_dir_get(&lfs, &mdir,
+ LFS_MKTAG(0x700, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_STRUCT, 1, sizeof(struct lfs_ctz)), &ctz)
+ => LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz));
+ lfs_ctz_fromle32(&ctz);
+ // rewrite block to contain bad pointer
+ uint8_t bbuffer[LFS_BLOCK_SIZE];
+ cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ uint32_t bad = lfs_tole32(0xcccccccc);
+ memcpy(&bbuffer[0], &bad, sizeof(bad));
+ memcpy(&bbuffer[4], &bad, sizeof(bad));
+ cfg.erase(&cfg, ctz.head) => 0;
+ cfg.prog(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ lfs_deinit(&lfs) => 0;
+
+ // test that accessing our bad file fails, note there's a number
+ // of ways to access the dir, some can fail, but some don't
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "file_here", &info) => 0;
+ assert(strcmp(info.name, "file_here") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == SIZE);
+
+ lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
+ lfs_file_close(&lfs, &file) => 0;
+
+ // any allocs that traverse CTZ must unfortunately must fail
+ if (SIZE > 2*LFS_BLOCK_SIZE) {
+ lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+
+[[case]] # invalid gstate pointer
+define.INVALSET = [0x3, 0x1, 0x2]
+in = "lfs.c"
+code = '''
+ // create littlefs
+ lfs_format(&lfs, &cfg) => 0;
+
+ // create an invalid gstate
+ lfs_init(&lfs, &cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_fs_prepmove(&lfs, 1, (lfs_block_t [2]){
+ (INVALSET & 0x1) ? 0xcccccccc : 0,
+ (INVALSET & 0x2) ? 0xcccccccc : 0});
+ lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
+ lfs_deinit(&lfs) => 0;
+
+ // test that mount fails gracefully
+ // mount may not fail, but our first alloc should fail when
+ // we try to fix the gstate
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "should_fail") => LFS_ERR_CORRUPT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+# cycle detection/recovery tests
+
+[[case]] # metadata-pair threaded-list loop test
+in = "lfs.c"
+code = '''
+ // create littlefs
+ lfs_format(&lfs, &cfg) => 0;
+
+ // change tail-pointer to point to ourself
+ lfs_init(&lfs, &cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
+ (lfs_block_t[2]){0, 1}})) => 0;
+ lfs_deinit(&lfs) => 0;
+
+ // test that mount fails gracefully
+ lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+'''
+
+[[case]] # metadata-pair threaded-list 2-length loop test
+in = "lfs.c"
+code = '''
+ // create littlefs with child dir
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "child") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // find child
+ lfs_init(&lfs, &cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_block_t pair[2];
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_dir_get(&lfs, &mdir,
+ LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
+ => LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
+ lfs_pair_fromle32(pair);
+ // change tail-pointer to point to root
+ lfs_dir_fetch(&lfs, &mdir, pair) => 0;
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
+ (lfs_block_t[2]){0, 1}})) => 0;
+ lfs_deinit(&lfs) => 0;
+
+ // test that mount fails gracefully
+ lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+'''
+
+[[case]] # metadata-pair threaded-list 1-length child loop test
+in = "lfs.c"
+code = '''
+ // create littlefs with child dir
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "child") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // find child
+ lfs_init(&lfs, &cfg) => 0;
+ lfs_mdir_t mdir;
+ lfs_block_t pair[2];
+ lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
+ lfs_dir_get(&lfs, &mdir,
+ LFS_MKTAG(0x7ff, 0x3ff, 0),
+ LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
+ => LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
+ lfs_pair_fromle32(pair);
+ // change tail-pointer to point to ourself
+ lfs_dir_fetch(&lfs, &mdir, pair) => 0;
+ lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
+ {LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8), pair})) => 0;
+ lfs_deinit(&lfs) => 0;
+
+ // test that mount fails gracefully
+ lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_exhaustion.toml b/components/fs/littlefs/littlefs/tests/test_exhaustion.toml
new file mode 100644
index 00000000..569611c5
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_exhaustion.toml
@@ -0,0 +1,465 @@
+[[case]] # test running a filesystem to exhaustion
+define.LFS_ERASE_CYCLES = 10
+define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
+define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
+define.LFS_BADBLOCK_BEHAVIOR = [
+ 'LFS_TESTBD_BADBLOCK_PROGERROR',
+ 'LFS_TESTBD_BADBLOCK_ERASEERROR',
+ 'LFS_TESTBD_BADBLOCK_READERROR',
+ 'LFS_TESTBD_BADBLOCK_PROGNOOP',
+ 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+]
+define.FILES = 10
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "roadrunner") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ uint32_t cycle = 0;
+ while (true) {
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // chose name, roughly random seed, and random 2^n size
+ sprintf(path, "roadrunner/test%d", i);
+ srand(cycle * i);
+ size = 1 << ((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
+ assert(res == 1 || res == LFS_ERR_NOSPC);
+ if (res == LFS_ERR_NOSPC) {
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ if (err == LFS_ERR_NOSPC) {
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "roadrunner/test%d", i);
+ srand(cycle * i);
+ size = 1 << ((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ char r;
+ lfs_file_read(&lfs, &file, &r, 1) => 1;
+ assert(r == c);
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ cycle += 1;
+ }
+
+exhausted:
+ // should still be readable
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "roadrunner/test%d", i);
+ lfs_stat(&lfs, path, &info) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ LFS_WARN("completed %d cycles", cycle);
+'''
+
+[[case]] # test running a filesystem to exhaustion
+ # which also requires expanding superblocks
+define.LFS_ERASE_CYCLES = 10
+define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
+define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
+define.LFS_BADBLOCK_BEHAVIOR = [
+ 'LFS_TESTBD_BADBLOCK_PROGERROR',
+ 'LFS_TESTBD_BADBLOCK_ERASEERROR',
+ 'LFS_TESTBD_BADBLOCK_READERROR',
+ 'LFS_TESTBD_BADBLOCK_PROGNOOP',
+ 'LFS_TESTBD_BADBLOCK_ERASENOOP',
+]
+define.FILES = 10
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ uint32_t cycle = 0;
+ while (true) {
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // chose name, roughly random seed, and random 2^n size
+ sprintf(path, "test%d", i);
+ srand(cycle * i);
+ size = 1 << ((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
+ assert(res == 1 || res == LFS_ERR_NOSPC);
+ if (res == LFS_ERR_NOSPC) {
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ if (err == LFS_ERR_NOSPC) {
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "test%d", i);
+ srand(cycle * i);
+ size = 1 << ((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ char r;
+ lfs_file_read(&lfs, &file, &r, 1) => 1;
+ assert(r == c);
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ cycle += 1;
+ }
+
+exhausted:
+ // should still be readable
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "test%d", i);
+ lfs_stat(&lfs, path, &info) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ LFS_WARN("completed %d cycles", cycle);
+'''
+
+# These are a sort of high-level litmus test for wear-leveling. One definition
+# of wear-leveling is that increasing a block device's space translates directly
+# into increasing the block devices lifetime. This is something we can actually
+# check for.
+
+[[case]] # wear-level test running a filesystem to exhaustion
+define.LFS_ERASE_CYCLES = 20
+define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
+define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
+define.FILES = 10
+code = '''
+ uint32_t run_cycles[2];
+ const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
+
+ for (int run = 0; run < 2; run++) {
+ for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
+ lfs_testbd_setwear(&cfg, b,
+ (b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
+ }
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "roadrunner") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ uint32_t cycle = 0;
+ while (true) {
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // chose name, roughly random seed, and random 2^n size
+ sprintf(path, "roadrunner/test%d", i);
+ srand(cycle * i);
+ size = 1 << ((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
+ assert(res == 1 || res == LFS_ERR_NOSPC);
+ if (res == LFS_ERR_NOSPC) {
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ if (err == LFS_ERR_NOSPC) {
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "roadrunner/test%d", i);
+ srand(cycle * i);
+ size = 1 << ((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ char r;
+ lfs_file_read(&lfs, &file, &r, 1) => 1;
+ assert(r == c);
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ cycle += 1;
+ }
+
+exhausted:
+ // should still be readable
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "roadrunner/test%d", i);
+ lfs_stat(&lfs, path, &info) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ run_cycles[run] = cycle;
+ LFS_WARN("completed %d blocks %d cycles",
+ run_block_count[run], run_cycles[run]);
+ }
+
+ // check we increased the lifetime by 2x with ~10% error
+ LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
+'''
+
+[[case]] # wear-level test + expanding superblock
+define.LFS_ERASE_CYCLES = 20
+define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
+define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
+define.FILES = 10
+code = '''
+ uint32_t run_cycles[2];
+ const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
+
+ for (int run = 0; run < 2; run++) {
+ for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
+ lfs_testbd_setwear(&cfg, b,
+ (b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
+ }
+
+ lfs_format(&lfs, &cfg) => 0;
+
+ uint32_t cycle = 0;
+ while (true) {
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // chose name, roughly random seed, and random 2^n size
+ sprintf(path, "test%d", i);
+ srand(cycle * i);
+ size = 1 << ((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
+ assert(res == 1 || res == LFS_ERR_NOSPC);
+ if (res == LFS_ERR_NOSPC) {
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ if (err == LFS_ERR_NOSPC) {
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "test%d", i);
+ srand(cycle * i);
+ size = 1 << ((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ char r;
+ lfs_file_read(&lfs, &file, &r, 1) => 1;
+ assert(r == c);
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ cycle += 1;
+ }
+
+exhausted:
+ // should still be readable
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "test%d", i);
+ lfs_stat(&lfs, path, &info) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ run_cycles[run] = cycle;
+ LFS_WARN("completed %d blocks %d cycles",
+ run_block_count[run], run_cycles[run]);
+ }
+
+ // check we increased the lifetime by 2x with ~10% error
+ LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
+'''
+
+[[case]] # test that we wear blocks roughly evenly
+define.LFS_ERASE_CYCLES = 0xffffffff
+define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
+define.LFS_BLOCK_CYCLES = [5, 4, 3, 2, 1]
+define.CYCLES = 100
+define.FILES = 10
+if = 'LFS_BLOCK_CYCLES < CYCLES/10'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "roadrunner") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ uint32_t cycle = 0;
+ while (cycle < CYCLES) {
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // chose name, roughly random seed, and random 2^n size
+ sprintf(path, "roadrunner/test%d", i);
+ srand(cycle * i);
+ size = 1 << 4; //((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
+ assert(res == 1 || res == LFS_ERR_NOSPC);
+ if (res == LFS_ERR_NOSPC) {
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ err = lfs_file_close(&lfs, &file);
+ assert(err == 0 || err == LFS_ERR_NOSPC);
+ if (err == LFS_ERR_NOSPC) {
+ lfs_unmount(&lfs) => 0;
+ goto exhausted;
+ }
+ }
+
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "roadrunner/test%d", i);
+ srand(cycle * i);
+ size = 1 << 4; //((rand() % 10)+2);
+
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ for (lfs_size_t j = 0; j < size; j++) {
+ char c = 'a' + (rand() % 26);
+ char r;
+ lfs_file_read(&lfs, &file, &r, 1) => 1;
+ assert(r == c);
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ cycle += 1;
+ }
+
+exhausted:
+ // should still be readable
+ lfs_mount(&lfs, &cfg) => 0;
+ for (uint32_t i = 0; i < FILES; i++) {
+ // check for errors
+ sprintf(path, "roadrunner/test%d", i);
+ lfs_stat(&lfs, path, &info) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ LFS_WARN("completed %d cycles", cycle);
+
+ // check the wear on our block device
+ lfs_testbd_wear_t minwear = -1;
+ lfs_testbd_wear_t totalwear = 0;
+ lfs_testbd_wear_t maxwear = 0;
+ // skip 0 and 1 as superblock movement is intentionally avoided
+ for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
+ lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
+ printf("%08x: wear %d\n", b, wear);
+ assert(wear >= 0);
+ if (wear < minwear) {
+ minwear = wear;
+ }
+ if (wear > maxwear) {
+ maxwear = wear;
+ }
+ totalwear += wear;
+ }
+ lfs_testbd_wear_t avgwear = totalwear / LFS_BLOCK_COUNT;
+ LFS_WARN("max wear: %d cycles", maxwear);
+ LFS_WARN("avg wear: %d cycles", totalwear / LFS_BLOCK_COUNT);
+ LFS_WARN("min wear: %d cycles", minwear);
+
+ // find standard deviation^2
+ lfs_testbd_wear_t dev2 = 0;
+ for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
+ lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
+ assert(wear >= 0);
+ lfs_testbd_swear_t diff = wear - avgwear;
+ dev2 += diff*diff;
+ }
+ dev2 /= totalwear;
+ LFS_WARN("std dev^2: %d", dev2);
+ assert(dev2 < 8);
+'''
+
diff --git a/components/fs/littlefs/littlefs/tests/test_files.toml b/components/fs/littlefs/littlefs/tests/test_files.toml
new file mode 100644
index 00000000..565e665b
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_files.toml
@@ -0,0 +1,486 @@
+
+[[case]] # simple file test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "hello",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ size = strlen("Hello World!")+1;
+ strcpy((char*)buffer, "Hello World!");
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ assert(strcmp((char*)buffer, "Hello World!") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # larger files
+define.SIZE = [32, 8192, 262144, 0, 7, 8193]
+define.CHUNKSIZE = [31, 16, 33, 1, 1023]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ // write
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ buffer[b] = rand() & 0xff;
+ }
+ lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // read
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => SIZE;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # rewriting files
+define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
+define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
+define.CHUNKSIZE = [31, 16, 1]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ // write
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ buffer[b] = rand() & 0xff;
+ }
+ lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // read
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => SIZE1;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // rewrite
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY) => 0;
+ srand(2);
+ for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ buffer[b] = rand() & 0xff;
+ }
+ lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // read
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => lfs_max(SIZE1, SIZE2);
+ srand(2);
+ for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ if (SIZE1 > SIZE2) {
+ srand(1);
+ for (lfs_size_t b = 0; b < SIZE2; b++) {
+ rand();
+ }
+ for (lfs_size_t i = SIZE2; i < SIZE1; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ }
+ lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # appending files
+define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
+define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
+define.CHUNKSIZE = [31, 16, 1]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ // write
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ buffer[b] = rand() & 0xff;
+ }
+ lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // read
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => SIZE1;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // append
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_APPEND) => 0;
+ srand(2);
+ for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ buffer[b] = rand() & 0xff;
+ }
+ lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // read
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => SIZE1 + SIZE2;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ srand(2);
+ for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # truncating files
+define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
+define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
+define.CHUNKSIZE = [31, 16, 1]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+
+ // write
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ buffer[b] = rand() & 0xff;
+ }
+ lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // read
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => SIZE1;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // truncate
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ srand(2);
+ for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ buffer[b] = rand() & 0xff;
+ }
+ lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // read
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => SIZE2;
+ srand(2);
+ for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # reentrant file writing
+define.SIZE = [32, 0, 7, 2049]
+define.CHUNKSIZE = [31, 16, 65]
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+
+ err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
+ assert(err == LFS_ERR_NOENT || err == 0);
+ if (err == 0) {
+ // can only be 0 (new file) or full size
+ size = lfs_file_size(&lfs, &file);
+ assert(size == 0 || size == SIZE);
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // write
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ buffer[b] = rand() & 0xff;
+ }
+ lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => SIZE;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # reentrant file writing with syncs
+define = [
+ # append (O(n))
+ {MODE='LFS_O_APPEND', SIZE=[32, 0, 7, 2049], CHUNKSIZE=[31, 16, 65]},
+ # truncate (O(n^2))
+ {MODE='LFS_O_TRUNC', SIZE=[32, 0, 7, 200], CHUNKSIZE=[31, 16, 65]},
+ # rewrite (O(n^2))
+ {MODE=0, SIZE=[32, 0, 7, 200], CHUNKSIZE=[31, 16, 65]},
+]
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+
+ err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
+ assert(err == LFS_ERR_NOENT || err == 0);
+ if (err == 0) {
+ // with syncs we could be any size, but it at least must be valid data
+ size = lfs_file_size(&lfs, &file);
+ assert(size <= SIZE);
+ srand(1);
+ for (lfs_size_t i = 0; i < size; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, size-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ // write
+ lfs_file_open(&lfs, &file, "avacado",
+ LFS_O_WRONLY | LFS_O_CREAT | MODE) => 0;
+ size = lfs_file_size(&lfs, &file);
+ assert(size <= SIZE);
+ srand(1);
+ lfs_size_t skip = (MODE == LFS_O_APPEND) ? size : 0;
+ for (lfs_size_t b = 0; b < skip; b++) {
+ rand();
+ }
+ for (lfs_size_t i = skip; i < SIZE; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ buffer[b] = rand() & 0xff;
+ }
+ lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
+ lfs_file_sync(&lfs, &file) => 0;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ // read
+ lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => SIZE;
+ srand(1);
+ for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
+ lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
+ lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
+ for (lfs_size_t b = 0; b < chunk; b++) {
+ assert(buffer[b] == (rand() & 0xff));
+ }
+ }
+ lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # many files
+define.N = 300
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ // create N files of 7 bytes
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "file_%03d", i);
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ char wbuffer[1024];
+ size = 7;
+ snprintf(wbuffer, size, "Hi %03d", i);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_close(&lfs, &file) => 0;
+
+ char rbuffer[1024];
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(strcmp(rbuffer, wbuffer) == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # many files with power cycle
+define.N = 300
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ // create N files of 7 bytes
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "file_%03d", i);
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ char wbuffer[1024];
+ size = 7;
+ snprintf(wbuffer, size, "Hi %03d", i);
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ char rbuffer[1024];
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(strcmp(rbuffer, wbuffer) == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # many files with power loss
+define.N = 300
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+ // create N files of 7 bytes
+ for (int i = 0; i < N; i++) {
+ sprintf(path, "file_%03d", i);
+ err = lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT);
+ char wbuffer[1024];
+ size = 7;
+ snprintf(wbuffer, size, "Hi %03d", i);
+ if ((lfs_size_t)lfs_file_size(&lfs, &file) != size) {
+ lfs_file_write(&lfs, &file, wbuffer, size) => size;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ char rbuffer[1024];
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, rbuffer, size) => size;
+ assert(strcmp(rbuffer, wbuffer) == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_interspersed.toml b/components/fs/littlefs/littlefs/tests/test_interspersed.toml
new file mode 100644
index 00000000..87a05780
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_interspersed.toml
@@ -0,0 +1,244 @@
+
+[[case]] # interspersed file test
+define.SIZE = [10, 100]
+define.FILES = [4, 10, 26]
+code = '''
+ lfs_file_t files[FILES];
+ const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int j = 0; j < FILES; j++) {
+ sprintf(path, "%c", alphas[j]);
+ lfs_file_open(&lfs, &files[j], path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ }
+
+ for (int i = 0; i < SIZE; i++) {
+ for (int j = 0; j < FILES; j++) {
+ lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
+ }
+ }
+
+ for (int j = 0; j < FILES; j++) {
+ lfs_file_close(&lfs, &files[j]);
+ }
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ for (int j = 0; j < FILES; j++) {
+ sprintf(path, "%c", alphas[j]);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == SIZE);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ for (int j = 0; j < FILES; j++) {
+ sprintf(path, "%c", alphas[j]);
+ lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
+ }
+
+ for (int i = 0; i < 10; i++) {
+ for (int j = 0; j < FILES; j++) {
+ lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
+ assert(buffer[0] == alphas[j]);
+ }
+ }
+
+ for (int j = 0; j < FILES; j++) {
+ lfs_file_close(&lfs, &files[j]);
+ }
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # interspersed remove file test
+define.SIZE = [10, 100]
+define.FILES = [4, 10, 26]
+code = '''
+ const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int j = 0; j < FILES; j++) {
+ sprintf(path, "%c", alphas[j]);
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ for (int i = 0; i < SIZE; i++) {
+ lfs_file_write(&lfs, &file, &alphas[j], 1) => 1;
+ }
+ lfs_file_close(&lfs, &file);
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "zzz", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ for (int j = 0; j < FILES; j++) {
+ lfs_file_write(&lfs, &file, (const void*)"~", 1) => 1;
+ lfs_file_sync(&lfs, &file) => 0;
+
+ sprintf(path, "%c", alphas[j]);
+ lfs_remove(&lfs, path) => 0;
+ }
+ lfs_file_close(&lfs, &file);
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "zzz") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == FILES);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "zzz", LFS_O_RDONLY) => 0;
+ for (int i = 0; i < FILES; i++) {
+ lfs_file_read(&lfs, &file, buffer, 1) => 1;
+ assert(buffer[0] == '~');
+ }
+ lfs_file_close(&lfs, &file);
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # remove inconveniently test
+define.SIZE = [10, 100]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_t files[3];
+ lfs_file_open(&lfs, &files[0], "e", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_open(&lfs, &files[1], "f", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_open(&lfs, &files[2], "g", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+
+ for (int i = 0; i < SIZE/2; i++) {
+ lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
+ lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
+ lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
+ }
+
+ lfs_remove(&lfs, "f") => 0;
+
+ for (int i = 0; i < SIZE/2; i++) {
+ lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
+ lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
+ lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
+ }
+
+ lfs_file_close(&lfs, &files[0]);
+ lfs_file_close(&lfs, &files[1]);
+ lfs_file_close(&lfs, &files[2]);
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "e") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == SIZE);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "g") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == SIZE);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &files[0], "e", LFS_O_RDONLY) => 0;
+ lfs_file_open(&lfs, &files[1], "g", LFS_O_RDONLY) => 0;
+ for (int i = 0; i < SIZE; i++) {
+ lfs_file_read(&lfs, &files[0], buffer, 1) => 1;
+ assert(buffer[0] == 'e');
+ lfs_file_read(&lfs, &files[1], buffer, 1) => 1;
+ assert(buffer[0] == 'g');
+ }
+ lfs_file_close(&lfs, &files[0]);
+ lfs_file_close(&lfs, &files[1]);
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # reentrant interspersed file test
+define.SIZE = [10, 100]
+define.FILES = [4, 10, 26]
+reentrant = true
+code = '''
+ lfs_file_t files[FILES];
+ const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
+
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+
+ for (int j = 0; j < FILES; j++) {
+ sprintf(path, "%c", alphas[j]);
+ lfs_file_open(&lfs, &files[j], path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ }
+
+ for (int i = 0; i < SIZE; i++) {
+ for (int j = 0; j < FILES; j++) {
+ size = lfs_file_size(&lfs, &files[j]);
+ assert((int)size >= 0);
+ if ((int)size <= i) {
+ lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
+ lfs_file_sync(&lfs, &files[j]) => 0;
+ }
+ }
+ }
+
+ for (int j = 0; j < FILES; j++) {
+ lfs_file_close(&lfs, &files[j]);
+ }
+
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ for (int j = 0; j < FILES; j++) {
+ sprintf(path, "%c", alphas[j]);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, path) == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == SIZE);
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ for (int j = 0; j < FILES; j++) {
+ sprintf(path, "%c", alphas[j]);
+ lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
+ }
+
+ for (int i = 0; i < 10; i++) {
+ for (int j = 0; j < FILES; j++) {
+ lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
+ assert(buffer[0] == alphas[j]);
+ }
+ }
+
+ for (int j = 0; j < FILES; j++) {
+ lfs_file_close(&lfs, &files[j]);
+ }
+
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_move.toml b/components/fs/littlefs/littlefs/tests/test_move.toml
new file mode 100644
index 00000000..bb3b713f
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_move.toml
@@ -0,0 +1,1815 @@
+[[case]] # move file
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "a") => 0;
+ lfs_mkdir(&lfs, "b") => 0;
+ lfs_mkdir(&lfs, "c") => 0;
+ lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
+ lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
+ lfs_file_write(&lfs, &file, "ohayo\n", 6) => 6;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hello", "c/hello") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hello") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 5+8+6);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 5) => 5;
+ memcmp(buffer, "hola\n", 5) => 0;
+ lfs_file_read(&lfs, &file, buffer, 8) => 8;
+ memcmp(buffer, "bonjour\n", 8) => 0;
+ lfs_file_read(&lfs, &file, buffer, 6) => 6;
+ memcmp(buffer, "ohayo\n", 6) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # noop move, yes this is legal
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "hi") => 0;
+ lfs_rename(&lfs, "hi", "hi") => 0;
+ lfs_mkdir(&lfs, "hi/hi") => 0;
+ lfs_rename(&lfs, "hi/hi", "hi/hi") => 0;
+ lfs_mkdir(&lfs, "hi/hi/hi") => 0;
+ lfs_rename(&lfs, "hi/hi/hi", "hi/hi/hi") => 0;
+ lfs_stat(&lfs, "hi/hi/hi", &info) => 0;
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move file corrupt source
+in = "lfs.c"
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "a") => 0;
+ lfs_mkdir(&lfs, "b") => 0;
+ lfs_mkdir(&lfs, "c") => 0;
+ lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
+ lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
+ lfs_file_write(&lfs, &file, "ohayo\n", 6) => 6;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hello", "c/hello") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // corrupt the source
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_block_t block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ uint8_t bbuffer[LFS_BLOCK_SIZE];
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ int off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hello") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 5+8+6);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 5) => 5;
+ memcmp(buffer, "hola\n", 5) => 0;
+ lfs_file_read(&lfs, &file, buffer, 8) => 8;
+ memcmp(buffer, "bonjour\n", 8) => 0;
+ lfs_file_read(&lfs, &file, buffer, 6) => 6;
+ memcmp(buffer, "ohayo\n", 6) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move file corrupt source and dest
+in = "lfs.c"
+if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "a") => 0;
+ lfs_mkdir(&lfs, "b") => 0;
+ lfs_mkdir(&lfs, "c") => 0;
+ lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
+ lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
+ lfs_file_write(&lfs, &file, "ohayo\n", 6) => 6;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hello", "c/hello") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // corrupt the source
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_block_t block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ uint8_t bbuffer[LFS_BLOCK_SIZE];
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ int off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ // corrupt the destination
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hello") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 5+8+6);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 5) => 5;
+ memcmp(buffer, "hola\n", 5) => 0;
+ lfs_file_read(&lfs, &file, buffer, 8) => 8;
+ memcmp(buffer, "bonjour\n", 8) => 0;
+ lfs_file_read(&lfs, &file, buffer, 6) => 6;
+ memcmp(buffer, "ohayo\n", 6) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move file after corrupt
+in = "lfs.c"
+if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "a") => 0;
+ lfs_mkdir(&lfs, "b") => 0;
+ lfs_mkdir(&lfs, "c") => 0;
+ lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
+ lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
+ lfs_file_write(&lfs, &file, "ohayo\n", 6) => 6;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hello", "c/hello") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // corrupt the source
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_block_t block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ uint8_t bbuffer[LFS_BLOCK_SIZE];
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ int off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ // corrupt the destination
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ // continue move
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hello", "c/hello") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hello") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 5+8+6);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 5) => 5;
+ memcmp(buffer, "hola\n", 5) => 0;
+ lfs_file_read(&lfs, &file, buffer, 8) => 8;
+ memcmp(buffer, "bonjour\n", 8) => 0;
+ lfs_file_read(&lfs, &file, buffer, 6) => 6;
+ memcmp(buffer, "ohayo\n", 6) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # simple reentrant move file
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+ err = lfs_mkdir(&lfs, "a");
+ assert(!err || err == LFS_ERR_EXIST);
+ err = lfs_mkdir(&lfs, "b");
+ assert(!err || err == LFS_ERR_EXIST);
+ err = lfs_mkdir(&lfs, "c");
+ assert(!err || err == LFS_ERR_EXIST);
+ err = lfs_mkdir(&lfs, "d");
+ assert(!err || err == LFS_ERR_EXIST);
+ lfs_unmount(&lfs) => 0;
+
+ while (true) {
+ lfs_mount(&lfs, &cfg) => 0;
+ // there should never exist _2_ hello files
+ int count = 0;
+ if (lfs_stat(&lfs, "a/hello", &info) == 0) {
+ assert(strcmp(info.name, "hello") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 5+8+6 || info.size == 0);
+ count += 1;
+ }
+ if (lfs_stat(&lfs, "b/hello", &info) == 0) {
+ assert(strcmp(info.name, "hello") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 5+8+6);
+ count += 1;
+ }
+ if (lfs_stat(&lfs, "c/hello", &info) == 0) {
+ assert(strcmp(info.name, "hello") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 5+8+6);
+ count += 1;
+ }
+ if (lfs_stat(&lfs, "d/hello", &info) == 0) {
+ assert(strcmp(info.name, "hello") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 5+8+6);
+ count += 1;
+ }
+ assert(count <= 1);
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ if (lfs_stat(&lfs, "a/hello", &info) == 0 && info.size > 0) {
+ lfs_rename(&lfs, "a/hello", "b/hello") => 0;
+ } else if (lfs_stat(&lfs, "b/hello", &info) == 0) {
+ lfs_rename(&lfs, "b/hello", "c/hello") => 0;
+ } else if (lfs_stat(&lfs, "c/hello", &info) == 0) {
+ lfs_rename(&lfs, "c/hello", "d/hello") => 0;
+ } else if (lfs_stat(&lfs, "d/hello", &info) == 0) {
+ // success
+ lfs_unmount(&lfs) => 0;
+ break;
+ } else {
+ // create file
+ lfs_file_open(&lfs, &file, "a/hello",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
+ lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
+ lfs_file_write(&lfs, &file, "ohayo\n", 6) => 6;
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+ }
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "d") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hello") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 5+8+6);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 5) => 5;
+ memcmp(buffer, "hola\n", 5) => 0;
+ lfs_file_read(&lfs, &file, buffer, 8) => 8;
+ memcmp(buffer, "bonjour\n", 8) => 0;
+ lfs_file_read(&lfs, &file, buffer, 6) => 6;
+ memcmp(buffer, "ohayo\n", 6) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move dir
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "a") => 0;
+ lfs_mkdir(&lfs, "b") => 0;
+ lfs_mkdir(&lfs, "c") => 0;
+ lfs_mkdir(&lfs, "d") => 0;
+ lfs_mkdir(&lfs, "a/hi") => 0;
+ lfs_mkdir(&lfs, "a/hi/hola") => 0;
+ lfs_mkdir(&lfs, "a/hi/bonjour") => 0;
+ lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hi", "c/hi") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "a/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "b/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "c/hi") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "bonjour") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hola") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "ohayo") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "d/hi") => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move dir corrupt source
+in = "lfs.c"
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "a") => 0;
+ lfs_mkdir(&lfs, "b") => 0;
+ lfs_mkdir(&lfs, "c") => 0;
+ lfs_mkdir(&lfs, "d") => 0;
+ lfs_mkdir(&lfs, "a/hi") => 0;
+ lfs_mkdir(&lfs, "a/hi/hola") => 0;
+ lfs_mkdir(&lfs, "a/hi/bonjour") => 0;
+ lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hi", "c/hi") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // corrupt the source
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_block_t block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ uint8_t bbuffer[LFS_BLOCK_SIZE];
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ int off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "a/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "b/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "c/hi") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "bonjour") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hola") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "ohayo") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "d/hi") => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move dir corrupt source and dest
+in = "lfs.c"
+if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "a") => 0;
+ lfs_mkdir(&lfs, "b") => 0;
+ lfs_mkdir(&lfs, "c") => 0;
+ lfs_mkdir(&lfs, "d") => 0;
+ lfs_mkdir(&lfs, "a/hi") => 0;
+ lfs_mkdir(&lfs, "a/hi/hola") => 0;
+ lfs_mkdir(&lfs, "a/hi/bonjour") => 0;
+ lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hi", "c/hi") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // corrupt the source
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_block_t block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ uint8_t bbuffer[LFS_BLOCK_SIZE];
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ int off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ // corrupt the destination
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "a/hi") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "bonjour") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hola") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "ohayo") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "b/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "c/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "d/hi") => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move dir after corrupt
+in = "lfs.c"
+if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "a") => 0;
+ lfs_mkdir(&lfs, "b") => 0;
+ lfs_mkdir(&lfs, "c") => 0;
+ lfs_mkdir(&lfs, "d") => 0;
+ lfs_mkdir(&lfs, "a/hi") => 0;
+ lfs_mkdir(&lfs, "a/hi/hola") => 0;
+ lfs_mkdir(&lfs, "a/hi/bonjour") => 0;
+ lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hi", "c/hi") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // corrupt the source
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_block_t block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ uint8_t bbuffer[LFS_BLOCK_SIZE];
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ int off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ // corrupt the destination
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ // continue move
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hi", "c/hi") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "c") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "a/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "b/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "c/hi") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "bonjour") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hola") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "ohayo") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "d/hi") => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # simple reentrant move dir
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+ err = lfs_mkdir(&lfs, "a");
+ assert(!err || err == LFS_ERR_EXIST);
+ err = lfs_mkdir(&lfs, "b");
+ assert(!err || err == LFS_ERR_EXIST);
+ err = lfs_mkdir(&lfs, "c");
+ assert(!err || err == LFS_ERR_EXIST);
+ err = lfs_mkdir(&lfs, "d");
+ assert(!err || err == LFS_ERR_EXIST);
+ lfs_unmount(&lfs) => 0;
+
+ while (true) {
+ lfs_mount(&lfs, &cfg) => 0;
+ // there should never exist _2_ hi directories
+ int count = 0;
+ if (lfs_stat(&lfs, "a/hi", &info) == 0) {
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ count += 1;
+ }
+ if (lfs_stat(&lfs, "b/hi", &info) == 0) {
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ count += 1;
+ }
+ if (lfs_stat(&lfs, "c/hi", &info) == 0) {
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ count += 1;
+ }
+ if (lfs_stat(&lfs, "d/hi", &info) == 0) {
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ count += 1;
+ }
+ assert(count <= 1);
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ if (lfs_stat(&lfs, "a/hi", &info) == 0) {
+ lfs_rename(&lfs, "a/hi", "b/hi") => 0;
+ } else if (lfs_stat(&lfs, "b/hi", &info) == 0) {
+ lfs_rename(&lfs, "b/hi", "c/hi") => 0;
+ } else if (lfs_stat(&lfs, "c/hi", &info) == 0) {
+ lfs_rename(&lfs, "c/hi", "d/hi") => 0;
+ } else if (lfs_stat(&lfs, "d/hi", &info) == 0) {
+ lfs_unmount(&lfs) => 0;
+ break; // success
+ } else {
+ // create dir and rename for atomicity
+ err = lfs_mkdir(&lfs, "temp");
+ assert(!err || err == LFS_ERR_EXIST);
+ err = lfs_mkdir(&lfs, "temp/hola");
+ assert(!err || err == LFS_ERR_EXIST);
+ err = lfs_mkdir(&lfs, "temp/bonjour");
+ assert(!err || err == LFS_ERR_EXIST);
+ err = lfs_mkdir(&lfs, "temp/ohayo");
+ assert(!err || err == LFS_ERR_EXIST);
+ lfs_rename(&lfs, "temp", "a/hi") => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+ }
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "a") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_dir_open(&lfs, &dir, "d") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hi") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "a/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "b/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "c/hi") => LFS_ERR_NOENT;
+ lfs_dir_open(&lfs, &dir, "d/hi") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "bonjour") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "hola") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "ohayo") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move state stealing
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "a") => 0;
+ lfs_mkdir(&lfs, "b") => 0;
+ lfs_mkdir(&lfs, "c") => 0;
+ lfs_mkdir(&lfs, "d") => 0;
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
+ lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
+ lfs_file_write(&lfs, &file, "ohayo\n", 6) => 6;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "a/hello", "b/hello") => 0;
+ lfs_unmount(&lfs) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "b/hello", "c/hello") => 0;
+ lfs_unmount(&lfs) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_rename(&lfs, "c/hello", "d/hello") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 5) => 5;
+ memcmp(buffer, "hola\n", 5) => 0;
+ lfs_file_read(&lfs, &file, buffer, 8) => 8;
+ memcmp(buffer, "bonjour\n", 8) => 0;
+ lfs_file_read(&lfs, &file, buffer, 6) => 6;
+ memcmp(buffer, "ohayo\n", 6) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_remove(&lfs, "b") => 0;
+ lfs_remove(&lfs, "c") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "a", &info) => 0;
+ lfs_stat(&lfs, "b", &info) => LFS_ERR_NOENT;
+ lfs_stat(&lfs, "c", &info) => LFS_ERR_NOENT;
+ lfs_stat(&lfs, "d", &info) => 0;
+ lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 5) => 5;
+ memcmp(buffer, "hola\n", 5) => 0;
+ lfs_file_read(&lfs, &file, buffer, 8) => 8;
+ memcmp(buffer, "bonjour\n", 8) => 0;
+ lfs_file_read(&lfs, &file, buffer, 6) => 6;
+ memcmp(buffer, "ohayo\n", 6) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+# Other specific corner cases
+[[case]] # create + delete in same commit with neighbors
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // littlefs keeps files sorted, so we know the order these will be in
+ lfs_file_open(&lfs, &file, "/1.move_me",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/0.before",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.1", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/2.in_between",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.2", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/4.after",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.3", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_t files[3];
+ lfs_file_open(&lfs, &files[0], "0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[1], "2.in_between",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[2], "4.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_write(&lfs, &files[0], "test.4", 7) => 7;
+ lfs_file_write(&lfs, &files[1], "test.5", 7) => 7;
+ lfs_file_write(&lfs, &files[2], "test.6", 7) => 7;
+
+ // rename file while everything is open, this triggers both
+ // a create and delete simultaneously
+ lfs_rename(&lfs, "/1.move_me", "/3.move_me") => 0;
+
+ lfs_file_close(&lfs, &files[0]) => 0;
+ lfs_file_close(&lfs, &files[1]) => 0;
+ lfs_file_close(&lfs, &files[2]) => 0;
+
+ // check that nothing was corrupted
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.in_between") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "3.move_me") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "4.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.4") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/2.in_between", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.5") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/4.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.6") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+
+ // now move back
+ lfs_file_open(&lfs, &files[0], "0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[1], "2.in_between",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[2], "4.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_write(&lfs, &files[0], "test.7", 7) => 7;
+ lfs_file_write(&lfs, &files[1], "test.8", 7) => 7;
+ lfs_file_write(&lfs, &files[2], "test.9", 7) => 7;
+
+ // rename file while everything is open, this triggers both
+ // a create and delete simultaneously
+ lfs_rename(&lfs, "/3.move_me", "/1.move_me") => 0;
+
+ lfs_file_close(&lfs, &files[0]) => 0;
+ lfs_file_close(&lfs, &files[1]) => 0;
+ lfs_file_close(&lfs, &files[2]) => 0;
+
+ // and check that nothing was corrupted again
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "1.move_me") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.in_between") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "4.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.7") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/2.in_between", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.8") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/4.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.9") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+# Other specific corner cases
+[[case]] # create + delete + delete in same commit with neighbors
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // littlefs keeps files sorted, so we know the order these will be in
+ lfs_file_open(&lfs, &file, "/1.move_me",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/3.move_me",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "remove me",
+ sizeof("remove me")) => sizeof("remove me");
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/0.before",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.1", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/2.in_between",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.2", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/4.after",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.3", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_t files[3];
+ lfs_file_open(&lfs, &files[0], "0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[1], "2.in_between",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[2], "4.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_write(&lfs, &files[0], "test.4", 7) => 7;
+ lfs_file_write(&lfs, &files[1], "test.5", 7) => 7;
+ lfs_file_write(&lfs, &files[2], "test.6", 7) => 7;
+
+ // rename file while everything is open, this triggers both
+ // a create and delete simultaneously
+ lfs_rename(&lfs, "/1.move_me", "/3.move_me") => 0;
+
+ lfs_file_close(&lfs, &files[0]) => 0;
+ lfs_file_close(&lfs, &files[1]) => 0;
+ lfs_file_close(&lfs, &files[2]) => 0;
+
+ // check that nothing was corrupted
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.in_between") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "3.move_me") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "4.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.4") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/2.in_between", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.5") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/4.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.6") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+
+ // now move back
+ lfs_file_open(&lfs, &file, "/1.move_me",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "remove me",
+ sizeof("remove me")) => sizeof("remove me");
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &files[0], "0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[1], "2.in_between",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[2], "4.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_write(&lfs, &files[0], "test.7", 7) => 7;
+ lfs_file_write(&lfs, &files[1], "test.8", 7) => 7;
+ lfs_file_write(&lfs, &files[2], "test.9", 7) => 7;
+
+ // rename file while everything is open, this triggers both
+ // a create and delete simultaneously
+ lfs_rename(&lfs, "/3.move_me", "/1.move_me") => 0;
+
+ lfs_file_close(&lfs, &files[0]) => 0;
+ lfs_file_close(&lfs, &files[1]) => 0;
+ lfs_file_close(&lfs, &files[2]) => 0;
+
+ // and check that nothing was corrupted again
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "1.move_me") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.in_between") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "4.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.7") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/2.in_between", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.8") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/4.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.9") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # create + delete in different dirs with neighbors
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ // littlefs keeps files sorted, so we know the order these will be in
+ lfs_mkdir(&lfs, "/dir.1") => 0;
+ lfs_mkdir(&lfs, "/dir.2") => 0;
+ lfs_file_open(&lfs, &file, "/dir.1/1.move_me",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/dir.2/1.move_me",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "remove me",
+ sizeof("remove me")) => sizeof("remove me");
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/dir.1/0.before",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.1", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/dir.1/2.after",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.2", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/dir.2/0.before",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.3", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/dir.2/2.after",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.4", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_t files[4];
+ lfs_file_open(&lfs, &files[0], "/dir.1/0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[1], "/dir.1/2.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[2], "/dir.2/0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[3], "/dir.2/2.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_write(&lfs, &files[0], "test.5", 7) => 7;
+ lfs_file_write(&lfs, &files[1], "test.6", 7) => 7;
+ lfs_file_write(&lfs, &files[2], "test.7", 7) => 7;
+ lfs_file_write(&lfs, &files[3], "test.8", 7) => 7;
+
+ // rename file while everything is open, this triggers both
+ // a create and delete as it overwrites the destination file
+ lfs_rename(&lfs, "/dir.1/1.move_me", "/dir.2/1.move_me") => 0;
+
+ lfs_file_close(&lfs, &files[0]) => 0;
+ lfs_file_close(&lfs, &files[1]) => 0;
+ lfs_file_close(&lfs, &files[2]) => 0;
+ lfs_file_close(&lfs, &files[3]) => 0;
+
+ // check that nothing was corrupted
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "dir.1") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "dir.2") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "/dir.1") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "/dir.2") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "1.move_me") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "/dir.1/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.5") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/dir.1/2.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.6") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/dir.2/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.7") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/dir.2/2.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.8") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+
+ // now move back
+ lfs_file_open(&lfs, &file, "/dir.1/1.move_me",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "remove me",
+ sizeof("remove me")) => sizeof("remove me");
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &files[0], "/dir.1/0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[1], "/dir.1/2.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[2], "/dir.2/0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[3], "/dir.2/2.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_write(&lfs, &files[0], "test.9", 7) => 7;
+ lfs_file_write(&lfs, &files[1], "test.a", 7) => 7;
+ lfs_file_write(&lfs, &files[2], "test.b", 7) => 7;
+ lfs_file_write(&lfs, &files[3], "test.c", 7) => 7;
+
+ // rename file while everything is open, this triggers both
+ // a create and delete simultaneously
+ lfs_rename(&lfs, "/dir.2/1.move_me", "/dir.1/1.move_me") => 0;
+
+ lfs_file_close(&lfs, &files[0]) => 0;
+ lfs_file_close(&lfs, &files[1]) => 0;
+ lfs_file_close(&lfs, &files[2]) => 0;
+ lfs_file_close(&lfs, &files[3]) => 0;
+
+ // and check that nothing was corrupted again
+ lfs_dir_open(&lfs, &dir, "/") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "dir.1") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "dir.2") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "/dir.1") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "1.move_me") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 0);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "/dir.2") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "/dir.1/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.9") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/dir.1/2.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.a") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/dir.2/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.b") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/dir.2/2.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.c") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move fix in relocation
+in = "lfs.c"
+define.RELOCATIONS = 'range(0x3+1)'
+define.LFS_ERASE_CYCLES = 0xffffffff
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ lfs_mkdir(&lfs, "/parent") => 0;
+ lfs_mkdir(&lfs, "/parent/child") => 0;
+
+ lfs_file_open(&lfs, &file, "/parent/1.move_me",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "move me",
+ sizeof("move me")) => sizeof("move me");
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/parent/0.before",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.1", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/2.after",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.2", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/child/0.before",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.3", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/child/2.after",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.4", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_t files[4];
+ lfs_file_open(&lfs, &files[0], "/parent/0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[1], "/parent/2.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[2], "/parent/child/0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[3], "/parent/child/2.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_write(&lfs, &files[0], "test.5", 7) => 7;
+ lfs_file_write(&lfs, &files[1], "test.6", 7) => 7;
+ lfs_file_write(&lfs, &files[2], "test.7", 7) => 7;
+ lfs_file_write(&lfs, &files[3], "test.8", 7) => 7;
+
+ // force specific directories to relocate
+ if (RELOCATIONS & 0x1) {
+ lfs_dir_open(&lfs, &dir, "/parent");
+ lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ }
+ if (RELOCATIONS & 0x2) {
+ lfs_dir_open(&lfs, &dir, "/parent/child");
+ lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ }
+
+ // ok, now we move the file, this creates a move that needs to be
+ // fixed, possibly in a metadata-pair that needs to be relocated
+ //
+ // the worst case is if we need to relocate and we need to implicit
+ // fix the move in our parent before it falls out of date
+ lfs_rename(&lfs, "/parent/1.move_me", "/parent/child/1.move_me") => 0;
+
+ lfs_file_close(&lfs, &files[0]) => 0;
+ lfs_file_close(&lfs, &files[1]) => 0;
+ lfs_file_close(&lfs, &files[2]) => 0;
+ lfs_file_close(&lfs, &files[3]) => 0;
+
+ // check that nothing was corrupted
+ lfs_dir_open(&lfs, &dir, "/parent") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "child") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "/parent/child") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "1.move_me") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == sizeof("move me"));
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "/parent/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.5") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/2.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.6") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/child/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.7") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/child/2.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.8") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # move fix in relocation with predecessor
+in = "lfs.c"
+define.RELOCATIONS = 'range(0x7+1)'
+define.LFS_ERASE_CYCLES = 0xffffffff
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ lfs_mkdir(&lfs, "/parent") => 0;
+ lfs_mkdir(&lfs, "/parent/child") => 0;
+ lfs_mkdir(&lfs, "/parent/sibling") => 0;
+
+ lfs_file_open(&lfs, &file, "/parent/sibling/1.move_me",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "move me",
+ sizeof("move me")) => sizeof("move me");
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "/parent/sibling/0.before",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.1", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/sibling/2.after",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.2", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/child/0.before",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.3", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/child/2.after",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_write(&lfs, &file, "test.4", 7) => 7;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_t files[4];
+ lfs_file_open(&lfs, &files[0], "/parent/sibling/0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[1], "/parent/sibling/2.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[2], "/parent/child/0.before",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_open(&lfs, &files[3], "/parent/child/2.after",
+ LFS_O_WRONLY | LFS_O_TRUNC) => 0;
+ lfs_file_write(&lfs, &files[0], "test.5", 7) => 7;
+ lfs_file_write(&lfs, &files[1], "test.6", 7) => 7;
+ lfs_file_write(&lfs, &files[2], "test.7", 7) => 7;
+ lfs_file_write(&lfs, &files[3], "test.8", 7) => 7;
+
+ // force specific directories to relocate
+ if (RELOCATIONS & 0x1) {
+ lfs_dir_open(&lfs, &dir, "/parent");
+ lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ }
+ if (RELOCATIONS & 0x2) {
+ lfs_dir_open(&lfs, &dir, "/parent/sibling");
+ lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ }
+ if (RELOCATIONS & 0x4) {
+ lfs_dir_open(&lfs, &dir, "/parent/child");
+ lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
+ lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ }
+
+ // ok, now we move the file, this creates a move that needs to be
+ // fixed, possibly in a metadata-pair that needs to be relocated
+ //
+ // and now relocations can force us to need to fix our move in either
+ // the parent or child before things break
+ lfs_rename(&lfs,
+ "/parent/sibling/1.move_me",
+ "/parent/child/1.move_me") => 0;
+
+ lfs_file_close(&lfs, &files[0]) => 0;
+ lfs_file_close(&lfs, &files[1]) => 0;
+ lfs_file_close(&lfs, &files[2]) => 0;
+ lfs_file_close(&lfs, &files[3]) => 0;
+
+ // check that nothing was corrupted
+ lfs_dir_open(&lfs, &dir, "/parent") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "child") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "sibling") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "/parent/sibling") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_dir_open(&lfs, &dir, "/parent/child") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, ".") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "..") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "0.before") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "1.move_me") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == sizeof("move me"));
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ assert(strcmp(info.name, "2.after") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ assert(info.size == 7);
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ lfs_file_open(&lfs, &file, "/parent/sibling/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.5") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/sibling/2.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.6") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/child/0.before", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.7") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_file_open(&lfs, &file, "/parent/child/2.after", LFS_O_RDONLY) => 0;
+ lfs_file_read(&lfs, &file, buffer, 7) => 7;
+ assert(strcmp((char*)buffer, "test.8") == 0);
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_orphans.toml b/components/fs/littlefs/littlefs/tests/test_orphans.toml
new file mode 100644
index 00000000..241e273e
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_orphans.toml
@@ -0,0 +1,120 @@
+[[case]] # orphan test
+in = "lfs.c"
+if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "parent") => 0;
+ lfs_mkdir(&lfs, "parent/orphan") => 0;
+ lfs_mkdir(&lfs, "parent/child") => 0;
+ lfs_remove(&lfs, "parent/orphan") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // corrupt the child's most recent commit, this should be the update
+ // to the linked-list entry, which should orphan the orphan. Note this
+ // makes a lot of assumptions about the remove operation.
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "parent/child") => 0;
+ lfs_block_t block = dir.m.pair[0];
+ lfs_dir_close(&lfs, &dir) => 0;
+ lfs_unmount(&lfs) => 0;
+ uint8_t bbuffer[LFS_BLOCK_SIZE];
+ cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ int off = LFS_BLOCK_SIZE-1;
+ while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
+ off -= 1;
+ }
+ memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
+ cfg.erase(&cfg, block) => 0;
+ cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
+ cfg.sync(&cfg) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
+ lfs_stat(&lfs, "parent/child", &info) => 0;
+ lfs_fs_size(&lfs) => 8;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
+ lfs_stat(&lfs, "parent/child", &info) => 0;
+ lfs_fs_size(&lfs) => 8;
+ // this mkdir should both create a dir and deorphan, so size
+ // should be unchanged
+ lfs_mkdir(&lfs, "parent/otherchild") => 0;
+ lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
+ lfs_stat(&lfs, "parent/child", &info) => 0;
+ lfs_stat(&lfs, "parent/otherchild", &info) => 0;
+ lfs_fs_size(&lfs) => 8;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
+ lfs_stat(&lfs, "parent/child", &info) => 0;
+ lfs_stat(&lfs, "parent/otherchild", &info) => 0;
+ lfs_fs_size(&lfs) => 8;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # reentrant testing for orphans, basically just spam mkdir/remove
+reentrant = true
+# TODO fix this case, caused by non-DAG trees
+if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
+define = [
+ {FILES=6, DEPTH=1, CYCLES=20},
+ {FILES=26, DEPTH=1, CYCLES=20},
+ {FILES=3, DEPTH=3, CYCLES=20},
+]
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+
+ srand(1);
+ const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
+ for (int i = 0; i < CYCLES; i++) {
+ // create random path
+ char full_path[256];
+ for (int d = 0; d < DEPTH; d++) {
+ sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
+ }
+
+ // if it does not exist, we create it, else we destroy
+ int res = lfs_stat(&lfs, full_path, &info);
+ if (res == LFS_ERR_NOENT) {
+ // create each directory in turn, ignore if dir already exists
+ for (int d = 0; d < DEPTH; d++) {
+ strcpy(path, full_path);
+ path[2*d+2] = '\0';
+ err = lfs_mkdir(&lfs, path);
+ assert(!err || err == LFS_ERR_EXIST);
+ }
+
+ for (int d = 0; d < DEPTH; d++) {
+ strcpy(path, full_path);
+ path[2*d+2] = '\0';
+ lfs_stat(&lfs, path, &info) => 0;
+ assert(strcmp(info.name, &path[2*d+1]) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ }
+ } else {
+ // is valid dir?
+ assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ // try to delete path in reverse order, ignore if dir is not empty
+ for (int d = DEPTH-1; d >= 0; d--) {
+ strcpy(path, full_path);
+ path[2*d+2] = '\0';
+ err = lfs_remove(&lfs, path);
+ assert(!err || err == LFS_ERR_NOTEMPTY);
+ }
+
+ lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
+ }
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
diff --git a/components/fs/littlefs/littlefs/tests/test_paths.toml b/components/fs/littlefs/littlefs/tests/test_paths.toml
new file mode 100644
index 00000000..a7474c0b
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_paths.toml
@@ -0,0 +1,293 @@
+
+[[case]] # simple path test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "tea") => 0;
+ lfs_mkdir(&lfs, "tea/hottea") => 0;
+ lfs_mkdir(&lfs, "tea/warmtea") => 0;
+ lfs_mkdir(&lfs, "tea/coldtea") => 0;
+
+ lfs_stat(&lfs, "tea/hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "/tea/hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+
+ lfs_mkdir(&lfs, "/milk") => 0;
+ lfs_stat(&lfs, "/milk", &info) => 0;
+ assert(strcmp(info.name, "milk") == 0);
+ lfs_stat(&lfs, "milk", &info) => 0;
+ assert(strcmp(info.name, "milk") == 0);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # redundant slashes
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "tea") => 0;
+ lfs_mkdir(&lfs, "tea/hottea") => 0;
+ lfs_mkdir(&lfs, "tea/warmtea") => 0;
+ lfs_mkdir(&lfs, "tea/coldtea") => 0;
+
+ lfs_stat(&lfs, "/tea/hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "//tea//hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "///tea///hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+
+ lfs_mkdir(&lfs, "////milk") => 0;
+ lfs_stat(&lfs, "////milk", &info) => 0;
+ assert(strcmp(info.name, "milk") == 0);
+ lfs_stat(&lfs, "milk", &info) => 0;
+ assert(strcmp(info.name, "milk") == 0);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # dot path test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "tea") => 0;
+ lfs_mkdir(&lfs, "tea/hottea") => 0;
+ lfs_mkdir(&lfs, "tea/warmtea") => 0;
+ lfs_mkdir(&lfs, "tea/coldtea") => 0;
+
+ lfs_stat(&lfs, "./tea/hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "/./tea/hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "/././tea/hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "/./tea/./hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+
+ lfs_mkdir(&lfs, "/./milk") => 0;
+ lfs_stat(&lfs, "/./milk", &info) => 0;
+ assert(strcmp(info.name, "milk") == 0);
+ lfs_stat(&lfs, "milk", &info) => 0;
+ assert(strcmp(info.name, "milk") == 0);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # dot dot path test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "tea") => 0;
+ lfs_mkdir(&lfs, "tea/hottea") => 0;
+ lfs_mkdir(&lfs, "tea/warmtea") => 0;
+ lfs_mkdir(&lfs, "tea/coldtea") => 0;
+ lfs_mkdir(&lfs, "coffee") => 0;
+ lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
+ lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
+ lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
+
+ lfs_stat(&lfs, "coffee/../tea/hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "tea/coldtea/../hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "coffee/coldcoffee/../../tea/hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "coffee/../coffee/../tea/hottea", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+
+ lfs_mkdir(&lfs, "coffee/../milk") => 0;
+ lfs_stat(&lfs, "coffee/../milk", &info) => 0;
+ strcmp(info.name, "milk") => 0;
+ lfs_stat(&lfs, "milk", &info) => 0;
+ strcmp(info.name, "milk") => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # trailing dot path test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "tea") => 0;
+ lfs_mkdir(&lfs, "tea/hottea") => 0;
+ lfs_mkdir(&lfs, "tea/warmtea") => 0;
+ lfs_mkdir(&lfs, "tea/coldtea") => 0;
+
+ lfs_stat(&lfs, "tea/hottea/", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "tea/hottea/.", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "tea/hottea/./.", &info) => 0;
+ assert(strcmp(info.name, "hottea") == 0);
+ lfs_stat(&lfs, "tea/hottea/..", &info) => 0;
+ assert(strcmp(info.name, "tea") == 0);
+ lfs_stat(&lfs, "tea/hottea/../.", &info) => 0;
+ assert(strcmp(info.name, "tea") == 0);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # leading dot path test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, ".milk") => 0;
+ lfs_stat(&lfs, ".milk", &info) => 0;
+ strcmp(info.name, ".milk") => 0;
+ lfs_stat(&lfs, "tea/.././.milk", &info) => 0;
+ strcmp(info.name, ".milk") => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # root dot dot path test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "tea") => 0;
+ lfs_mkdir(&lfs, "tea/hottea") => 0;
+ lfs_mkdir(&lfs, "tea/warmtea") => 0;
+ lfs_mkdir(&lfs, "tea/coldtea") => 0;
+ lfs_mkdir(&lfs, "coffee") => 0;
+ lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
+ lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
+ lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
+
+ lfs_stat(&lfs, "coffee/../../../../../../tea/hottea", &info) => 0;
+ strcmp(info.name, "hottea") => 0;
+
+ lfs_mkdir(&lfs, "coffee/../../../../../../milk") => 0;
+ lfs_stat(&lfs, "coffee/../../../../../../milk", &info) => 0;
+ strcmp(info.name, "milk") => 0;
+ lfs_stat(&lfs, "milk", &info) => 0;
+ strcmp(info.name, "milk") => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # invalid path tests
+code = '''
+ lfs_format(&lfs, &cfg);
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "dirt", &info) => LFS_ERR_NOENT;
+ lfs_stat(&lfs, "dirt/ground", &info) => LFS_ERR_NOENT;
+ lfs_stat(&lfs, "dirt/ground/earth", &info) => LFS_ERR_NOENT;
+
+ lfs_remove(&lfs, "dirt") => LFS_ERR_NOENT;
+ lfs_remove(&lfs, "dirt/ground") => LFS_ERR_NOENT;
+ lfs_remove(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
+
+ lfs_mkdir(&lfs, "dirt/ground") => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "dirt/ground", LFS_O_WRONLY | LFS_O_CREAT)
+ => LFS_ERR_NOENT;
+ lfs_mkdir(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
+ lfs_file_open(&lfs, &file, "dirt/ground/earth", LFS_O_WRONLY | LFS_O_CREAT)
+ => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # root operations
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "/", &info) => 0;
+ assert(strcmp(info.name, "/") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
+ lfs_file_open(&lfs, &file, "/", LFS_O_WRONLY | LFS_O_CREAT)
+ => LFS_ERR_ISDIR;
+
+ lfs_remove(&lfs, "/") => LFS_ERR_INVAL;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # root representations
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "/", &info) => 0;
+ assert(strcmp(info.name, "/") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_stat(&lfs, "", &info) => 0;
+ assert(strcmp(info.name, "/") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_stat(&lfs, ".", &info) => 0;
+ assert(strcmp(info.name, "/") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_stat(&lfs, "..", &info) => 0;
+ assert(strcmp(info.name, "/") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_stat(&lfs, "//", &info) => 0;
+ assert(strcmp(info.name, "/") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_stat(&lfs, "./", &info) => 0;
+ assert(strcmp(info.name, "/") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # superblock conflict test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "littlefs", &info) => LFS_ERR_NOENT;
+ lfs_remove(&lfs, "littlefs") => LFS_ERR_NOENT;
+
+ lfs_mkdir(&lfs, "littlefs") => 0;
+ lfs_stat(&lfs, "littlefs", &info) => 0;
+ assert(strcmp(info.name, "littlefs") == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ lfs_remove(&lfs, "littlefs") => 0;
+ lfs_stat(&lfs, "littlefs", &info) => LFS_ERR_NOENT;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # max path test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "coffee") => 0;
+ lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
+ lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
+ lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
+
+ memset(path, 'w', LFS_NAME_MAX+1);
+ path[LFS_NAME_MAX+1] = '\0';
+ lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
+ lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
+ => LFS_ERR_NAMETOOLONG;
+
+ memcpy(path, "coffee/", strlen("coffee/"));
+ memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX+1);
+ path[strlen("coffee/")+LFS_NAME_MAX+1] = '\0';
+ lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
+ lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
+ => LFS_ERR_NAMETOOLONG;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # really big path test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_mkdir(&lfs, "coffee") => 0;
+ lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
+ lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
+ lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
+
+ memset(path, 'w', LFS_NAME_MAX);
+ path[LFS_NAME_MAX] = '\0';
+ lfs_mkdir(&lfs, path) => 0;
+ lfs_remove(&lfs, path) => 0;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_remove(&lfs, path) => 0;
+
+ memcpy(path, "coffee/", strlen("coffee/"));
+ memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX);
+ path[strlen("coffee/")+LFS_NAME_MAX] = '\0';
+ lfs_mkdir(&lfs, path) => 0;
+ lfs_remove(&lfs, path) => 0;
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_remove(&lfs, path) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
diff --git a/components/fs/littlefs/littlefs/tests/test_relocations.toml b/components/fs/littlefs/littlefs/tests/test_relocations.toml
new file mode 100644
index 00000000..71b10475
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_relocations.toml
@@ -0,0 +1,305 @@
+# specific corner cases worth explicitly testing for
+[[case]] # dangling split dir test
+define.ITERATIONS = 20
+define.COUNT = 10
+define.LFS_BLOCK_CYCLES = [8, 1]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ // fill up filesystem so only ~16 blocks are left
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ memset(buffer, 0, 512);
+ while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
+ lfs_file_write(&lfs, &file, buffer, 512) => 512;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ // make a child dir to use in bounded space
+ lfs_mkdir(&lfs, "child") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int j = 0; j < ITERATIONS; j++) {
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
+ lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_dir_open(&lfs, &dir, "child") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "test%03d_loooooooooooooooooong_name", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ strcmp(info.name, path) => 0;
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ if (j == ITERATIONS-1) {
+ break;
+ }
+
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
+ lfs_remove(&lfs, path) => 0;
+ }
+ }
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_dir_open(&lfs, &dir, "child") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "test%03d_loooooooooooooooooong_name", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ strcmp(info.name, path) => 0;
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
+ lfs_remove(&lfs, path) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # outdated head test
+define.ITERATIONS = 20
+define.COUNT = 10
+define.LFS_BLOCK_CYCLES = [8, 1]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ // fill up filesystem so only ~16 blocks are left
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ memset(buffer, 0, 512);
+ while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
+ lfs_file_write(&lfs, &file, buffer, 512) => 512;
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ // make a child dir to use in bounded space
+ lfs_mkdir(&lfs, "child") => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int j = 0; j < ITERATIONS; j++) {
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
+ lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_dir_open(&lfs, &dir, "child") => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "test%03d_loooooooooooooooooong_name", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ strcmp(info.name, path) => 0;
+ info.size => 0;
+
+ sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
+ lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0;
+ lfs_file_write(&lfs, &file, "hi", 2) => 2;
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ lfs_dir_rewind(&lfs, &dir) => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "test%03d_loooooooooooooooooong_name", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ strcmp(info.name, path) => 0;
+ info.size => 2;
+
+ sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
+ lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0;
+ lfs_file_write(&lfs, &file, "hi", 2) => 2;
+ lfs_file_close(&lfs, &file) => 0;
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+
+ lfs_dir_rewind(&lfs, &dir) => 0;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "test%03d_loooooooooooooooooong_name", i);
+ lfs_dir_read(&lfs, &dir, &info) => 1;
+ strcmp(info.name, path) => 0;
+ info.size => 2;
+ }
+ lfs_dir_read(&lfs, &dir, &info) => 0;
+ lfs_dir_close(&lfs, &dir) => 0;
+
+ for (int i = 0; i < COUNT; i++) {
+ sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
+ lfs_remove(&lfs, path) => 0;
+ }
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # reentrant testing for relocations, this is the same as the
+ # orphan testing, except here we also set block_cycles so that
+ # almost every tree operation needs a relocation
+reentrant = true
+# TODO fix this case, caused by non-DAG trees
+if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
+define = [
+ {FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
+ {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
+ {FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
+]
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+
+ srand(1);
+ const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
+ for (int i = 0; i < CYCLES; i++) {
+ // create random path
+ char full_path[256];
+ for (int d = 0; d < DEPTH; d++) {
+ sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
+ }
+
+ // if it does not exist, we create it, else we destroy
+ int res = lfs_stat(&lfs, full_path, &info);
+ if (res == LFS_ERR_NOENT) {
+ // create each directory in turn, ignore if dir already exists
+ for (int d = 0; d < DEPTH; d++) {
+ strcpy(path, full_path);
+ path[2*d+2] = '\0';
+ err = lfs_mkdir(&lfs, path);
+ assert(!err || err == LFS_ERR_EXIST);
+ }
+
+ for (int d = 0; d < DEPTH; d++) {
+ strcpy(path, full_path);
+ path[2*d+2] = '\0';
+ lfs_stat(&lfs, path, &info) => 0;
+ assert(strcmp(info.name, &path[2*d+1]) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ }
+ } else {
+ // is valid dir?
+ assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ // try to delete path in reverse order, ignore if dir is not empty
+ for (int d = DEPTH-1; d >= 0; d--) {
+ strcpy(path, full_path);
+ path[2*d+2] = '\0';
+ err = lfs_remove(&lfs, path);
+ assert(!err || err == LFS_ERR_NOTEMPTY);
+ }
+
+ lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
+ }
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # reentrant testing for relocations, but now with random renames!
+reentrant = true
+# TODO fix this case, caused by non-DAG trees
+if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
+define = [
+ {FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
+ {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
+ {FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
+]
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+
+ srand(1);
+ const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
+ for (int i = 0; i < CYCLES; i++) {
+ // create random path
+ char full_path[256];
+ for (int d = 0; d < DEPTH; d++) {
+ sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
+ }
+
+ // if it does not exist, we create it, else we destroy
+ int res = lfs_stat(&lfs, full_path, &info);
+ assert(!res || res == LFS_ERR_NOENT);
+ if (res == LFS_ERR_NOENT) {
+ // create each directory in turn, ignore if dir already exists
+ for (int d = 0; d < DEPTH; d++) {
+ strcpy(path, full_path);
+ path[2*d+2] = '\0';
+ err = lfs_mkdir(&lfs, path);
+ assert(!err || err == LFS_ERR_EXIST);
+ }
+
+ for (int d = 0; d < DEPTH; d++) {
+ strcpy(path, full_path);
+ path[2*d+2] = '\0';
+ lfs_stat(&lfs, path, &info) => 0;
+ assert(strcmp(info.name, &path[2*d+1]) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ }
+ } else {
+ assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+
+ // create new random path
+ char new_path[256];
+ for (int d = 0; d < DEPTH; d++) {
+ sprintf(&new_path[2*d], "/%c", alpha[rand() % FILES]);
+ }
+
+ // if new path does not exist, rename, otherwise destroy
+ res = lfs_stat(&lfs, new_path, &info);
+ assert(!res || res == LFS_ERR_NOENT);
+ if (res == LFS_ERR_NOENT) {
+ // stop once some dir is renamed
+ for (int d = 0; d < DEPTH; d++) {
+ strcpy(&path[2*d], &full_path[2*d]);
+ path[2*d+2] = '\0';
+ strcpy(&path[128+2*d], &new_path[2*d]);
+ path[128+2*d+2] = '\0';
+ err = lfs_rename(&lfs, path, path+128);
+ assert(!err || err == LFS_ERR_NOTEMPTY);
+ if (!err) {
+ strcpy(path, path+128);
+ }
+ }
+
+ for (int d = 0; d < DEPTH; d++) {
+ strcpy(path, new_path);
+ path[2*d+2] = '\0';
+ lfs_stat(&lfs, path, &info) => 0;
+ assert(strcmp(info.name, &path[2*d+1]) == 0);
+ assert(info.type == LFS_TYPE_DIR);
+ }
+
+ lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
+ } else {
+ // try to delete path in reverse order,
+ // ignore if dir is not empty
+ for (int d = DEPTH-1; d >= 0; d--) {
+ strcpy(path, full_path);
+ path[2*d+2] = '\0';
+ err = lfs_remove(&lfs, path);
+ assert(!err || err == LFS_ERR_NOTEMPTY);
+ }
+
+ lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
+ }
+ }
+ }
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_seek.toml b/components/fs/littlefs/littlefs/tests/test_seek.toml
new file mode 100644
index 00000000..79d7728a
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_seek.toml
@@ -0,0 +1,380 @@
+
+[[case]] # simple file seek
+define = [
+ {COUNT=132, SKIP=4},
+ {COUNT=132, SKIP=128},
+ {COUNT=200, SKIP=10},
+ {COUNT=200, SKIP=100},
+ {COUNT=4, SKIP=1},
+ {COUNT=4, SKIP=2},
+]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "kitty",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ size = strlen("kittycatcat");
+ memcpy(buffer, "kittycatcat", size);
+ for (int j = 0; j < COUNT; j++) {
+ lfs_file_write(&lfs, &file, buffer, size);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY) => 0;
+
+ lfs_soff_t pos = -1;
+ size = strlen("kittycatcat");
+ for (int i = 0; i < SKIP; i++) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+ pos = lfs_file_tell(&lfs, &file);
+ }
+ assert(pos >= 0);
+
+ lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ lfs_file_rewind(&lfs, &file) => 0;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ lfs_file_seek(&lfs, &file, size, LFS_SEEK_CUR) => 3*size;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ lfs_file_seek(&lfs, &file, -size, LFS_SEEK_CUR) => pos;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ size = lfs_file_size(&lfs, &file);
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # simple file seek and write
+define = [
+ {COUNT=132, SKIP=4},
+ {COUNT=132, SKIP=128},
+ {COUNT=200, SKIP=10},
+ {COUNT=200, SKIP=100},
+ {COUNT=4, SKIP=1},
+ {COUNT=4, SKIP=2},
+]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "kitty",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ size = strlen("kittycatcat");
+ memcpy(buffer, "kittycatcat", size);
+ for (int j = 0; j < COUNT; j++) {
+ lfs_file_write(&lfs, &file, buffer, size);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
+
+ lfs_soff_t pos = -1;
+ size = strlen("kittycatcat");
+ for (int i = 0; i < SKIP; i++) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+ pos = lfs_file_tell(&lfs, &file);
+ }
+ assert(pos >= 0);
+
+ memcpy(buffer, "doggodogdog", size);
+ lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+
+ lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "doggodogdog", size) => 0;
+
+ lfs_file_rewind(&lfs, &file) => 0;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "doggodogdog", size) => 0;
+
+ lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ size = lfs_file_size(&lfs, &file);
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # boundary seek and writes
+define.COUNT = 132
+define.OFFSETS = '"{512, 1020, 513, 1021, 511, 1019, 1441}"'
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "kitty",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ size = strlen("kittycatcat");
+ memcpy(buffer, "kittycatcat", size);
+ for (int j = 0; j < COUNT; j++) {
+ lfs_file_write(&lfs, &file, buffer, size);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
+
+ size = strlen("hedgehoghog");
+ const lfs_soff_t offsets[] = OFFSETS;
+
+ for (unsigned i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) {
+ lfs_soff_t off = offsets[i];
+ memcpy(buffer, "hedgehoghog", size);
+ lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hedgehoghog", size) => 0;
+
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hedgehoghog", size) => 0;
+
+ lfs_file_sync(&lfs, &file) => 0;
+
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "kittycatcat", size) => 0;
+
+ lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hedgehoghog", size) => 0;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # out of bounds seek
+define = [
+ {COUNT=132, SKIP=4},
+ {COUNT=132, SKIP=128},
+ {COUNT=200, SKIP=10},
+ {COUNT=200, SKIP=100},
+ {COUNT=4, SKIP=2},
+ {COUNT=4, SKIP=3},
+]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "kitty",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
+ size = strlen("kittycatcat");
+ memcpy(buffer, "kittycatcat", size);
+ for (int j = 0; j < COUNT; j++) {
+ lfs_file_write(&lfs, &file, buffer, size);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
+
+ size = strlen("kittycatcat");
+ lfs_file_size(&lfs, &file) => COUNT*size;
+ lfs_file_seek(&lfs, &file, (COUNT+SKIP)*size,
+ LFS_SEEK_SET) => (COUNT+SKIP)*size;
+ lfs_file_read(&lfs, &file, buffer, size) => 0;
+
+ memcpy(buffer, "porcupineee", size);
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+
+ lfs_file_seek(&lfs, &file, (COUNT+SKIP)*size,
+ LFS_SEEK_SET) => (COUNT+SKIP)*size;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "porcupineee", size) => 0;
+
+ lfs_file_seek(&lfs, &file, COUNT*size,
+ LFS_SEEK_SET) => COUNT*size;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "\0\0\0\0\0\0\0\0\0\0\0", size) => 0;
+
+ lfs_file_seek(&lfs, &file, -((COUNT+SKIP)*size),
+ LFS_SEEK_CUR) => LFS_ERR_INVAL;
+ lfs_file_tell(&lfs, &file) => (COUNT+1)*size;
+
+ lfs_file_seek(&lfs, &file, -((COUNT+2*SKIP)*size),
+ LFS_SEEK_END) => LFS_ERR_INVAL;
+ lfs_file_tell(&lfs, &file) => (COUNT+1)*size;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # inline write and seek
+define.SIZE = [2, 4, 128, 132]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "tinykitty",
+ LFS_O_RDWR | LFS_O_CREAT) => 0;
+ int j = 0;
+ int k = 0;
+
+ memcpy(buffer, "abcdefghijklmnopqrstuvwxyz", 26);
+ for (unsigned i = 0; i < SIZE; i++) {
+ lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
+ lfs_file_tell(&lfs, &file) => i+1;
+ lfs_file_size(&lfs, &file) => i+1;
+ }
+
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
+ lfs_file_tell(&lfs, &file) => 0;
+ lfs_file_size(&lfs, &file) => SIZE;
+ for (unsigned i = 0; i < SIZE; i++) {
+ uint8_t c;
+ lfs_file_read(&lfs, &file, &c, 1) => 1;
+ c => buffer[k++ % 26];
+ }
+
+ lfs_file_sync(&lfs, &file) => 0;
+ lfs_file_tell(&lfs, &file) => SIZE;
+ lfs_file_size(&lfs, &file) => SIZE;
+
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
+ for (unsigned i = 0; i < SIZE; i++) {
+ lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
+ lfs_file_tell(&lfs, &file) => i+1;
+ lfs_file_size(&lfs, &file) => SIZE;
+ lfs_file_sync(&lfs, &file) => 0;
+ lfs_file_tell(&lfs, &file) => i+1;
+ lfs_file_size(&lfs, &file) => SIZE;
+ if (i < SIZE-2) {
+ uint8_t c[3];
+ lfs_file_seek(&lfs, &file, -1, LFS_SEEK_CUR) => i;
+ lfs_file_read(&lfs, &file, &c, 3) => 3;
+ lfs_file_tell(&lfs, &file) => i+3;
+ lfs_file_size(&lfs, &file) => SIZE;
+ lfs_file_seek(&lfs, &file, i+1, LFS_SEEK_SET) => i+1;
+ lfs_file_tell(&lfs, &file) => i+1;
+ lfs_file_size(&lfs, &file) => SIZE;
+ }
+ }
+
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
+ lfs_file_tell(&lfs, &file) => 0;
+ lfs_file_size(&lfs, &file) => SIZE;
+ for (unsigned i = 0; i < SIZE; i++) {
+ uint8_t c;
+ lfs_file_read(&lfs, &file, &c, 1) => 1;
+ c => buffer[k++ % 26];
+ }
+
+ lfs_file_sync(&lfs, &file) => 0;
+ lfs_file_tell(&lfs, &file) => SIZE;
+ lfs_file_size(&lfs, &file) => SIZE;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # file seek and write with power-loss
+# must be power-of-2 for quadratic probing to be exhaustive
+define.COUNT = [4, 64, 128]
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+ err = lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY);
+ assert(!err || err == LFS_ERR_NOENT);
+ if (!err) {
+ if (lfs_file_size(&lfs, &file) != 0) {
+ lfs_file_size(&lfs, &file) => 11*COUNT;
+ for (int j = 0; j < COUNT; j++) {
+ memset(buffer, 0, 11+1);
+ lfs_file_read(&lfs, &file, buffer, 11) => 11;
+ assert(memcmp(buffer, "kittycatcat", 11) == 0 ||
+ memcmp(buffer, "doggodogdog", 11) == 0);
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_file_open(&lfs, &file, "kitty", LFS_O_WRONLY | LFS_O_CREAT) => 0;
+ if (lfs_file_size(&lfs, &file) == 0) {
+ for (int j = 0; j < COUNT; j++) {
+ strcpy((char*)buffer, "kittycatcat");
+ size = strlen((char*)buffer);
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ strcpy((char*)buffer, "doggodogdog");
+ size = strlen((char*)buffer);
+
+ lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
+ lfs_file_size(&lfs, &file) => COUNT*size;
+ // seek and write using quadratic probing to touch all
+ // 11-byte words in the file
+ lfs_off_t off = 0;
+ for (int j = 0; j < COUNT; j++) {
+ off = (5*off + 1) % COUNT;
+ lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ assert(memcmp(buffer, "kittycatcat", size) == 0 ||
+ memcmp(buffer, "doggodogdog", size) == 0);
+ if (memcmp(buffer, "doggodogdog", size) != 0) {
+ lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
+ strcpy((char*)buffer, "doggodogdog");
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ assert(memcmp(buffer, "doggodogdog", size) == 0);
+ lfs_file_sync(&lfs, &file) => 0;
+ lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ assert(memcmp(buffer, "doggodogdog", size) == 0);
+ }
+ }
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
+ lfs_file_size(&lfs, &file) => COUNT*size;
+ for (int j = 0; j < COUNT; j++) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ assert(memcmp(buffer, "doggodogdog", size) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_superblocks.toml b/components/fs/littlefs/littlefs/tests/test_superblocks.toml
new file mode 100644
index 00000000..407c8454
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_superblocks.toml
@@ -0,0 +1,127 @@
+[[case]] # simple formatting test
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+'''
+
+[[case]] # mount/unmount
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # reentrant format
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # invalid mount
+code = '''
+ lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
+'''
+
+[[case]] # expanding superblock
+define.LFS_BLOCK_CYCLES = [32, 33, 1]
+define.N = [10, 100, 1000]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ lfs_file_open(&lfs, &file, "dummy",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_stat(&lfs, "dummy", &info) => 0;
+ assert(strcmp(info.name, "dummy") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ lfs_remove(&lfs, "dummy") => 0;
+ }
+ lfs_unmount(&lfs) => 0;
+
+ // one last check after power-cycle
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "dummy",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_stat(&lfs, "dummy", &info) => 0;
+ assert(strcmp(info.name, "dummy") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # expanding superblock with power cycle
+define.LFS_BLOCK_CYCLES = [32, 33, 1]
+define.N = [10, 100, 1000]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ for (int i = 0; i < N; i++) {
+ lfs_mount(&lfs, &cfg) => 0;
+ // remove lingering dummy?
+ err = lfs_stat(&lfs, "dummy", &info);
+ assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
+ if (!err) {
+ assert(strcmp(info.name, "dummy") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ lfs_remove(&lfs, "dummy") => 0;
+ }
+
+ lfs_file_open(&lfs, &file, "dummy",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_stat(&lfs, "dummy", &info) => 0;
+ assert(strcmp(info.name, "dummy") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ lfs_unmount(&lfs) => 0;
+ }
+
+ // one last check after power-cycle
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "dummy", &info) => 0;
+ assert(strcmp(info.name, "dummy") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # reentrant expanding superblock
+define.LFS_BLOCK_CYCLES = [2, 1]
+define.N = 24
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+
+ for (int i = 0; i < N; i++) {
+ // remove lingering dummy?
+ err = lfs_stat(&lfs, "dummy", &info);
+ assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
+ if (!err) {
+ assert(strcmp(info.name, "dummy") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ lfs_remove(&lfs, "dummy") => 0;
+ }
+
+ lfs_file_open(&lfs, &file, "dummy",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_stat(&lfs, "dummy", &info) => 0;
+ assert(strcmp(info.name, "dummy") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ }
+
+ lfs_unmount(&lfs) => 0;
+
+ // one last check after power-cycle
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_stat(&lfs, "dummy", &info) => 0;
+ assert(strcmp(info.name, "dummy") == 0);
+ assert(info.type == LFS_TYPE_REG);
+ lfs_unmount(&lfs) => 0;
+'''
diff --git a/components/fs/littlefs/littlefs/tests/test_truncate.toml b/components/fs/littlefs/littlefs/tests/test_truncate.toml
new file mode 100644
index 00000000..850d7aae
--- /dev/null
+++ b/components/fs/littlefs/littlefs/tests/test_truncate.toml
@@ -0,0 +1,439 @@
+[[case]] # simple truncate
+define.MEDIUMSIZE = [32, 2048]
+define.LARGESIZE = 8192
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldynoop",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+
+ strcpy((char*)buffer, "hair");
+ size = strlen((char*)buffer);
+ for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_size(&lfs, &file) => LARGESIZE;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
+ lfs_file_size(&lfs, &file) => LARGESIZE;
+
+ lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+
+ size = strlen("hair");
+ for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hair", size) => 0;
+ }
+ lfs_file_read(&lfs, &file, buffer, size) => 0;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # truncate and read
+define.MEDIUMSIZE = [32, 2048]
+define.LARGESIZE = 8192
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldyread",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+
+ strcpy((char*)buffer, "hair");
+ size = strlen((char*)buffer);
+ for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_size(&lfs, &file) => LARGESIZE;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDWR) => 0;
+ lfs_file_size(&lfs, &file) => LARGESIZE;
+
+ lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+
+ size = strlen("hair");
+ for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hair", size) => 0;
+ }
+ lfs_file_read(&lfs, &file, buffer, size) => 0;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+
+ size = strlen("hair");
+ for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hair", size) => 0;
+ }
+ lfs_file_read(&lfs, &file, buffer, size) => 0;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # write, truncate, and read
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "sequence",
+ LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+
+ size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
+ lfs_size_t qsize = size / 4;
+ uint8_t *wb = buffer;
+ uint8_t *rb = buffer + size;
+ for (lfs_off_t j = 0; j < size; ++j) {
+ wb[j] = j;
+ }
+
+ /* Spread sequence over size */
+ lfs_file_write(&lfs, &file, wb, size) => size;
+ lfs_file_size(&lfs, &file) => size;
+ lfs_file_tell(&lfs, &file) => size;
+
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
+ lfs_file_tell(&lfs, &file) => 0;
+
+ /* Chop off the last quarter */
+ lfs_size_t trunc = size - qsize;
+ lfs_file_truncate(&lfs, &file, trunc) => 0;
+ lfs_file_tell(&lfs, &file) => 0;
+ lfs_file_size(&lfs, &file) => trunc;
+
+ /* Read should produce first 3/4 */
+ lfs_file_read(&lfs, &file, rb, size) => trunc;
+ memcmp(rb, wb, trunc) => 0;
+
+ /* Move to 1/4 */
+ lfs_file_size(&lfs, &file) => trunc;
+ lfs_file_seek(&lfs, &file, qsize, LFS_SEEK_SET) => qsize;
+ lfs_file_tell(&lfs, &file) => qsize;
+
+ /* Chop to 1/2 */
+ trunc -= qsize;
+ lfs_file_truncate(&lfs, &file, trunc) => 0;
+ lfs_file_tell(&lfs, &file) => qsize;
+ lfs_file_size(&lfs, &file) => trunc;
+
+ /* Read should produce second quarter */
+ lfs_file_read(&lfs, &file, rb, size) => trunc - qsize;
+ memcmp(rb, wb + qsize, trunc - qsize) => 0;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # truncate and write
+define.MEDIUMSIZE = [32, 2048]
+define.LARGESIZE = 8192
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldywrite",
+ LFS_O_WRONLY | LFS_O_CREAT) => 0;
+
+ strcpy((char*)buffer, "hair");
+ size = strlen((char*)buffer);
+ for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_size(&lfs, &file) => LARGESIZE;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDWR) => 0;
+ lfs_file_size(&lfs, &file) => LARGESIZE;
+
+ lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+
+ strcpy((char*)buffer, "bald");
+ size = strlen((char*)buffer);
+ for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+
+ size = strlen("bald");
+ for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "bald", size) => 0;
+ }
+ lfs_file_read(&lfs, &file, buffer, size) => 0;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # truncate write under powerloss
+define.SMALLSIZE = [4, 512]
+define.MEDIUMSIZE = [32, 1024]
+define.LARGESIZE = 2048
+reentrant = true
+code = '''
+ err = lfs_mount(&lfs, &cfg);
+ if (err) {
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ }
+ err = lfs_file_open(&lfs, &file, "baldy", LFS_O_RDONLY);
+ assert(!err || err == LFS_ERR_NOENT);
+ if (!err) {
+ size = lfs_file_size(&lfs, &file);
+ assert(size == 0 ||
+ size == LARGESIZE ||
+ size == MEDIUMSIZE ||
+ size == SMALLSIZE);
+ for (lfs_off_t j = 0; j < size; j += 4) {
+ lfs_file_read(&lfs, &file, buffer, 4) => 4;
+ assert(memcmp(buffer, "hair", 4) == 0 ||
+ memcmp(buffer, "bald", 4) == 0 ||
+ memcmp(buffer, "comb", 4) == 0);
+ }
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_file_open(&lfs, &file, "baldy",
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+ lfs_file_size(&lfs, &file) => 0;
+ strcpy((char*)buffer, "hair");
+ size = strlen((char*)buffer);
+ for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_size(&lfs, &file) => LARGESIZE;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
+ lfs_file_size(&lfs, &file) => LARGESIZE;
+ lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+ strcpy((char*)buffer, "bald");
+ size = strlen((char*)buffer);
+ for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+ lfs_file_truncate(&lfs, &file, SMALLSIZE) => 0;
+ lfs_file_size(&lfs, &file) => SMALLSIZE;
+ strcpy((char*)buffer, "comb");
+ size = strlen((char*)buffer);
+ for (lfs_off_t j = 0; j < SMALLSIZE; j += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_size(&lfs, &file) => SMALLSIZE;
+ lfs_file_close(&lfs, &file) => 0;
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # more aggressive general truncation tests
+define.CONFIG = 'range(6)'
+define.SMALLSIZE = 32
+define.MEDIUMSIZE = 2048
+define.LARGESIZE = 8192
+code = '''
+ #define COUNT 5
+ const struct {
+ lfs_off_t startsizes[COUNT];
+ lfs_off_t startseeks[COUNT];
+ lfs_off_t hotsizes[COUNT];
+ lfs_off_t coldsizes[COUNT];
+ } configs[] = {
+ // cold shrinking
+ {{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ {2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ {2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ { 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE}},
+ // cold expanding
+ {{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ {2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ {2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ { 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE}},
+ // warm shrinking truncate
+ {{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ {2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ { 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
+ { 0, 0, 0, 0, 0}},
+ // warm expanding truncate
+ {{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
+ { 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
+ {2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ {2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE}},
+ // mid-file shrinking truncate
+ {{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ { LARGESIZE, LARGESIZE, LARGESIZE, LARGESIZE, LARGESIZE},
+ { 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
+ { 0, 0, 0, 0, 0}},
+ // mid-file expanding truncate
+ {{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
+ { 0, 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE},
+ {2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
+ {2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE}},
+ };
+
+ const lfs_off_t *startsizes = configs[CONFIG].startsizes;
+ const lfs_off_t *startseeks = configs[CONFIG].startseeks;
+ const lfs_off_t *hotsizes = configs[CONFIG].hotsizes;
+ const lfs_off_t *coldsizes = configs[CONFIG].coldsizes;
+
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+
+ for (unsigned i = 0; i < COUNT; i++) {
+ sprintf(path, "hairyhead%d", i);
+ lfs_file_open(&lfs, &file, path,
+ LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
+
+ strcpy((char*)buffer, "hair");
+ size = strlen((char*)buffer);
+ for (lfs_off_t j = 0; j < startsizes[i]; j += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+ }
+ lfs_file_size(&lfs, &file) => startsizes[i];
+
+ if (startseeks[i] != startsizes[i]) {
+ lfs_file_seek(&lfs, &file,
+ startseeks[i], LFS_SEEK_SET) => startseeks[i];
+ }
+
+ lfs_file_truncate(&lfs, &file, hotsizes[i]) => 0;
+ lfs_file_size(&lfs, &file) => hotsizes[i];
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+
+ for (unsigned i = 0; i < COUNT; i++) {
+ sprintf(path, "hairyhead%d", i);
+ lfs_file_open(&lfs, &file, path, LFS_O_RDWR) => 0;
+ lfs_file_size(&lfs, &file) => hotsizes[i];
+
+ size = strlen("hair");
+ lfs_off_t j = 0;
+ for (; j < startsizes[i] && j < hotsizes[i]; j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hair", size) => 0;
+ }
+
+ for (; j < hotsizes[i]; j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "\0\0\0\0", size) => 0;
+ }
+
+ lfs_file_truncate(&lfs, &file, coldsizes[i]) => 0;
+ lfs_file_size(&lfs, &file) => coldsizes[i];
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_unmount(&lfs) => 0;
+
+ lfs_mount(&lfs, &cfg) => 0;
+
+ for (unsigned i = 0; i < COUNT; i++) {
+ sprintf(path, "hairyhead%d", i);
+ lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
+ lfs_file_size(&lfs, &file) => coldsizes[i];
+
+ size = strlen("hair");
+ lfs_off_t j = 0;
+ for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i];
+ j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hair", size) => 0;
+ }
+
+ for (; j < coldsizes[i]; j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "\0\0\0\0", size) => 0;
+ }
+
+ lfs_file_close(&lfs, &file) => 0;
+ }
+
+ lfs_unmount(&lfs) => 0;
+'''
+
+[[case]] # noop truncate
+define.MEDIUMSIZE = [32, 2048]
+code = '''
+ lfs_format(&lfs, &cfg) => 0;
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldynoop",
+ LFS_O_RDWR | LFS_O_CREAT) => 0;
+
+ strcpy((char*)buffer, "hair");
+ size = strlen((char*)buffer);
+ for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
+ lfs_file_write(&lfs, &file, buffer, size) => size;
+
+ // this truncate should do nothing
+ lfs_file_truncate(&lfs, &file, j+size) => 0;
+ }
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+
+ lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
+ // should do nothing again
+ lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+
+ for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hair", size) => 0;
+ }
+ lfs_file_read(&lfs, &file, buffer, size) => 0;
+
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+
+ // still there after reboot?
+ lfs_mount(&lfs, &cfg) => 0;
+ lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
+ lfs_file_size(&lfs, &file) => MEDIUMSIZE;
+ for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
+ lfs_file_read(&lfs, &file, buffer, size) => size;
+ memcmp(buffer, "hair", size) => 0;
+ }
+ lfs_file_read(&lfs, &file, buffer, size) => 0;
+ lfs_file_close(&lfs, &file) => 0;
+ lfs_unmount(&lfs) => 0;
+'''