Merge pull request #11624 from geky/littlefs-v1.7.2

LittleFS: Update to v1.7.2
pull/11747/head
Martin Kojtal 2019-10-23 14:55:10 +02:00 committed by GitHub
commit bb48dd35c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 410 additions and 224 deletions

View File

@ -35,7 +35,7 @@ script:
if [ "$TRAVIS_TEST_RESULT" -eq 0 ] if [ "$TRAVIS_TEST_RESULT" -eq 0 ]
then then
CURR=$(tail -n1 sizes | awk '{print $1}') CURR=$(tail -n1 sizes | awk '{print $1}')
PREV=$(curl -u $GEKY_BOT_STATUSES https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \ PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \
| jq -re "select(.sha != \"$TRAVIS_COMMIT\") | jq -re "select(.sha != \"$TRAVIS_COMMIT\")
| .statuses[] | select(.context == \"$STAGE/$NAME\").description | .statuses[] | select(.context == \"$STAGE/$NAME\").description
| capture(\"code size is (?<size>[0-9]+)\").size" \ | capture(\"code size is (?<size>[0-9]+)\").size" \
@ -100,6 +100,7 @@ jobs:
env: env:
- STAGE=test - STAGE=test
- NAME=littlefs-fuse - NAME=littlefs-fuse
if: branch !~ -prefix$
install: install:
- sudo apt-get install libfuse-dev - sudo apt-get install libfuse-dev
- git clone --depth 1 https://github.com/geky/littlefs-fuse - git clone --depth 1 https://github.com/geky/littlefs-fuse
@ -128,56 +129,88 @@ jobs:
- ls - ls
- make -B test_dirs test_files QUIET=1 - make -B test_dirs test_files QUIET=1
# Automatically update releases # Automatically create releases
- stage: deploy - stage: deploy
env: env:
- STAGE=deploy - STAGE=deploy
- NAME=deploy - NAME=deploy
script: script:
# Find version defined in lfs.h
- LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
- LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
- LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
# Grab latests patch from repo tags, default to 0
- LFS_VERSION_PATCH=$(curl -f -u "$GEKY_BOT_RELEASES"
https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs
| jq 'map(.ref | match(
"refs/tags/v'"$LFS_VERSION_MAJOR"'\\.'"$LFS_VERSION_MINOR"'\\.(.*)$")
.captures[].string | tonumber + 1) | max // 0')
# We have our new version
- LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
- echo "VERSION $LFS_VERSION"
- | - |
bash << 'SCRIPT'
set -ev
# Find version defined in lfs.h
LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
# Grab latests patch from repo tags, default to 0, needs finagling
# to get past github's pagination api
PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.
PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \
| sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \
|| echo $PREV_URL)
LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \
| jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g")
.captures[].string | tonumber) | max + 1' \
|| echo 0)
# We have our new version
LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
echo "VERSION $LFS_VERSION"
# Check that we're the most recent commit # Check that we're the most recent commit
CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \ CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \ https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \
| jq -re '.sha') | jq -re '.sha')
if [ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] [ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0
# Create major branch
git branch v$LFS_VERSION_MAJOR HEAD
# Create major prefix branch
git config user.name "geky bot"
git config user.email "bot@geky.net"
git fetch https://github.com/$TRAVIS_REPO_SLUG.git \
--depth=50 v$LFS_VERSION_MAJOR-prefix || true
./scripts/prefix.py lfs$LFS_VERSION_MAJOR
git branch v$LFS_VERSION_MAJOR-prefix $( \
git commit-tree $(git write-tree) \
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
-p HEAD \
-m "Generated v$LFS_VERSION_MAJOR prefixes")
git reset --hard
# Update major version branches (vN and vN-prefix)
git push https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
v$LFS_VERSION_MAJOR \
v$LFS_VERSION_MAJOR-prefix
# Create patch version tag (vN.N.N)
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs \
-d "{
\"ref\": \"refs/tags/$LFS_VERSION\",
\"sha\": \"$TRAVIS_COMMIT\"
}"
# Create minor release?
[[ "$LFS_VERSION" == *.0 ]] || exit 0
# Build release notes
PREV=$(git tag --sort=-v:refname -l "v*.0" | head -1)
if [ ! -z "$PREV" ]
then then
# Build release notes echo "PREV $PREV"
PREV=$(git tag --sort=-v:refname -l "v*" | head -1) CHANGES=$'### Changes\n\n'$( \
if [ ! -z "$PREV" ] git log --oneline $PREV.. --grep='^Merge' --invert-grep)
then printf "CHANGES\n%s\n\n" "$CHANGES"
echo "PREV $PREV"
CHANGES=$'### Changes\n\n'$( \
git log --oneline $PREV.. --grep='^Merge' --invert-grep)
printf "CHANGES\n%s\n\n" "$CHANGES"
fi
# Create the release
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
-d "{
\"tag_name\": \"$LFS_VERSION\",
\"target_commitish\": \"$TRAVIS_COMMIT\",
\"name\": \"${LFS_VERSION%.0}\",
\"body\": $(jq -sR '.' <<< "$CHANGES")
}"
fi fi
# Create the release
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
-d "{
\"tag_name\": \"$LFS_VERSION\",
\"name\": \"${LFS_VERSION%.0}\",
\"draft\": true,
\"body\": $(jq -sR '.' <<< "$CHANGES")
}" #"
SCRIPT
# Manage statuses # Manage statuses
before_install: before_install:
- | - |
curl -u $GEKY_BOT_STATUSES -X POST \ curl -u "$GEKY_BOT_STATUSES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-d "{ -d "{
\"context\": \"$STAGE/$NAME\", \"context\": \"$STAGE/$NAME\",
@ -188,7 +221,7 @@ before_install:
after_failure: after_failure:
- | - |
curl -u $GEKY_BOT_STATUSES -X POST \ curl -u "$GEKY_BOT_STATUSES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-d "{ -d "{
\"context\": \"$STAGE/$NAME\", \"context\": \"$STAGE/$NAME\",
@ -199,7 +232,7 @@ after_failure:
after_success: after_success:
- | - |
curl -u $GEKY_BOT_STATUSES -X POST \ curl -u "$GEKY_BOT_STATUSES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-d "{ -d "{
\"context\": \"$STAGE/$NAME\", \"context\": \"$STAGE/$NAME\",

View File

@ -25,7 +25,8 @@ ifdef WORD
override CFLAGS += -m$(WORD) override CFLAGS += -m$(WORD)
endif endif
override CFLAGS += -I. override CFLAGS += -I.
override CFLAGS += -std=c99 -Wall -pedantic -Wshadow -Wunused-parameter override CFLAGS += -std=c99 -Wall -pedantic
override CFLAGS += -Wshadow -Wunused-parameter -Wjump-misses-init -Wsign-compare
all: $(TARGET) all: $(TARGET)

View File

@ -111,9 +111,9 @@ filesystem until sync or close is called on the file.
## Other notes ## Other notes
All littlefs have the potential to return a negative error code. The errors All littlefs calls have the potential to return a negative error code. The
can be either one of those found in the `enum lfs_error` in [lfs.h](lfs.h), errors can be either one of those found in the `enum lfs_error` in
or an error returned by the user's block device operations. [lfs.h](lfs.h), or an error returned by the user's block device operations.
In the configuration struct, the `prog` and `erase` function provided by the In the configuration struct, the `prog` and `erase` function provided by the
user may return a `LFS_ERR_CORRUPT` error if the implementation already can user may return a `LFS_ERR_CORRUPT` error if the implementation already can
@ -175,3 +175,18 @@ handy.
[littlefs-js](https://github.com/geky/littlefs-js) - A JavaScript wrapper for [littlefs-js](https://github.com/geky/littlefs-js) - A JavaScript wrapper for
littlefs. I'm not sure why you would want this, but it is handy for demos. littlefs. I'm not sure why you would want this, but it is handy for demos.
You can see it in action [here](http://littlefs.geky.net/demo.html). You can see it in action [here](http://littlefs.geky.net/demo.html).
[mklfs](https://github.com/whitecatboard/Lua-RTOS-ESP32/tree/master/components/mklfs/src) -
A command line tool built by the [Lua RTOS](https://github.com/whitecatboard/Lua-RTOS-ESP32)
guys for making littlefs images from a host PC. Supports Windows, Mac OS,
and Linux.
[SPIFFS](https://github.com/pellepl/spiffs) - Another excellent embedded
filesystem for NOR flash. As a more traditional logging filesystem with full
static wear-leveling, SPIFFS will likely outperform littlefs on small
memories such as the internal flash on microcontrollers.
[Dhara](https://github.com/dlbeer/dhara) - An interesting NAND flash
translation layer designed for small MCUs. It offers static wear-leveling and
power-resilience with only a fixed O(|address|) pointer structure stored on
each block and in RAM.

View File

@ -47,19 +47,24 @@ int lfs_emubd_create(const struct lfs_config *cfg, const char *path) {
// Load stats to continue incrementing // Load stats to continue incrementing
snprintf(emu->child, LFS_NAME_MAX, "stats"); snprintf(emu->child, LFS_NAME_MAX, "stats");
FILE *f = fopen(emu->path, "r"); FILE *f = fopen(emu->path, "r");
if (!f) { if (!f && errno != ENOENT) {
return -errno; return -errno;
} }
size_t res = fread(&emu->stats, sizeof(emu->stats), 1, f); if (errno == ENOENT) {
if (res < 1) { memset(&emu->stats, 0x0, sizeof(emu->stats));
return -errno; } else {
} size_t res = fread(&emu->stats, sizeof(emu->stats), 1, f);
if (res < 1) {
return -errno;
}
err = fclose(f); err = fclose(f);
if (err) { if (err) {
return -errno; return -errno;
}
} }
return 0; return 0;

View File

@ -888,7 +888,7 @@ nextname:
} }
// check that entry has not been moved // check that entry has not been moved
if (entry->d.type & 0x80) { if (!lfs->moving && entry->d.type & 0x80) {
int moved = lfs_moved(lfs, &entry->d.u); int moved = lfs_moved(lfs, &entry->d.u);
if (moved < 0 || moved) { if (moved < 0 || moved) {
return (moved < 0) ? moved : LFS_ERR_NOENT; return (moved < 0) ? moved : LFS_ERR_NOENT;
@ -924,7 +924,7 @@ int lfs_mkdir(lfs_t *lfs, const char *path) {
// build up new directory // build up new directory
lfs_alloc_ack(lfs); lfs_alloc_ack(lfs);
lfs_dir_t dir = { 0 }; lfs_dir_t dir;
err = lfs_dir_alloc(lfs, &dir); err = lfs_dir_alloc(lfs, &dir);
if (err) { if (err) {
return err; return err;
@ -1373,7 +1373,10 @@ int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
} }
// zero to avoid information leak // zero to avoid information leak
lfs_cache_zero(lfs, &file->cache); lfs_cache_drop(lfs, &file->cache);
if ((file->flags & 3) != LFS_O_RDONLY) {
lfs_cache_zero(lfs, &file->cache);
}
// add to list of files // add to list of files
file->next = lfs->files; file->next = lfs->files;
@ -1641,6 +1644,11 @@ lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
file->pos = file->size; file->pos = file->size;
} }
if (file->pos + size > LFS_FILE_MAX) {
// larger than file limit?
return LFS_ERR_FBIG;
}
if (!(file->flags & LFS_F_WRITING) && file->pos > file->size) { if (!(file->flags & LFS_F_WRITING) && file->pos > file->size) {
// fill with zeros // fill with zeros
lfs_off_t pos = file->pos; lfs_off_t pos = file->pos;
@ -1727,24 +1735,24 @@ lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
return err; return err;
} }
// update pos // find new pos
lfs_soff_t npos = file->pos;
if (whence == LFS_SEEK_SET) { if (whence == LFS_SEEK_SET) {
file->pos = off; npos = off;
} else if (whence == LFS_SEEK_CUR) { } else if (whence == LFS_SEEK_CUR) {
if (off < 0 && (lfs_off_t)-off > file->pos) { npos = file->pos + off;
return LFS_ERR_INVAL;
}
file->pos = file->pos + off;
} else if (whence == LFS_SEEK_END) { } else if (whence == LFS_SEEK_END) {
if (off < 0 && (lfs_off_t)-off > file->size) { npos = file->size + off;
return LFS_ERR_INVAL;
}
file->pos = file->size + off;
} }
return file->pos; if (npos < 0 || npos > LFS_FILE_MAX) {
// file position out of range
return LFS_ERR_INVAL;
}
// update pos
file->pos = npos;
return npos;
} }
int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) { int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
@ -1919,7 +1927,14 @@ int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
// find old entry // find old entry
lfs_dir_t oldcwd; lfs_dir_t oldcwd;
lfs_entry_t oldentry; lfs_entry_t oldentry;
int err = lfs_dir_find(lfs, &oldcwd, &oldentry, &oldpath); int err = lfs_dir_find(lfs, &oldcwd, &oldentry, &(const char *){oldpath});
if (err) {
return err;
}
// mark as moving
oldentry.d.type |= 0x80;
err = lfs_dir_update(lfs, &oldcwd, &oldentry, NULL);
if (err) { if (err) {
return err; return err;
} }
@ -1932,11 +1947,9 @@ int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
return err; return err;
} }
bool prevexists = (err != LFS_ERR_NOENT);
bool samepair = (lfs_paircmp(oldcwd.pair, newcwd.pair) == 0);
// must have same type // must have same type
if (prevexists && preventry.d.type != oldentry.d.type) { bool prevexists = (err != LFS_ERR_NOENT);
if (prevexists && preventry.d.type != (0x7f & oldentry.d.type)) {
return LFS_ERR_ISDIR; return LFS_ERR_ISDIR;
} }
@ -1953,18 +1966,6 @@ int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
} }
} }
// mark as moving
oldentry.d.type |= 0x80;
err = lfs_dir_update(lfs, &oldcwd, &oldentry, NULL);
if (err) {
return err;
}
// update pair if newcwd == oldcwd
if (samepair) {
newcwd = oldcwd;
}
// move to new location // move to new location
lfs_entry_t newentry = preventry; lfs_entry_t newentry = preventry;
newentry.d = oldentry.d; newentry.d = oldentry.d;
@ -1983,10 +1984,13 @@ int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
} }
} }
// update pair if newcwd == oldcwd // fetch old pair again in case dir block changed
if (samepair) { lfs->moving = true;
oldcwd = newcwd; err = lfs_dir_find(lfs, &oldcwd, &oldentry, &oldpath);
if (err) {
return err;
} }
lfs->moving = false;
// remove old entry // remove old entry
err = lfs_dir_remove(lfs, &oldcwd, &oldentry); err = lfs_dir_remove(lfs, &oldcwd, &oldentry);
@ -2055,8 +2059,8 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
} }
// zero to avoid information leaks // zero to avoid information leaks
lfs_cache_zero(lfs, &lfs->rcache);
lfs_cache_zero(lfs, &lfs->pcache); lfs_cache_zero(lfs, &lfs->pcache);
lfs_cache_drop(lfs, &lfs->rcache);
// setup lookahead, round down to nearest 32-bits // setup lookahead, round down to nearest 32-bits
LFS_ASSERT(lfs->cfg->lookahead % 32 == 0); LFS_ASSERT(lfs->cfg->lookahead % 32 == 0);
@ -2084,6 +2088,7 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
lfs->files = NULL; lfs->files = NULL;
lfs->dirs = NULL; lfs->dirs = NULL;
lfs->deorphaned = false; lfs->deorphaned = false;
lfs->moving = false;
return 0; return 0;
@ -2093,83 +2098,86 @@ cleanup:
} }
int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) { int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
int err = lfs_init(lfs, cfg); int err = 0;
if (err) { if (true) {
return err; err = lfs_init(lfs, cfg);
} if (err) {
return err;
}
// create free lookahead // create free lookahead
memset(lfs->free.buffer, 0, lfs->cfg->lookahead/8); memset(lfs->free.buffer, 0, lfs->cfg->lookahead/8);
lfs->free.off = 0; lfs->free.off = 0;
lfs->free.size = lfs_min(lfs->cfg->lookahead, lfs->cfg->block_count); lfs->free.size = lfs_min(lfs->cfg->lookahead, lfs->cfg->block_count);
lfs->free.i = 0; lfs->free.i = 0;
lfs_alloc_ack(lfs); lfs_alloc_ack(lfs);
// create superblock dir // create superblock dir
lfs_dir_t superdir = { 0 }; lfs_dir_t superdir;
err = lfs_dir_alloc(lfs, &superdir); err = lfs_dir_alloc(lfs, &superdir);
if (err) { if (err) {
goto cleanup;
}
// write root directory
lfs_dir_t root;
err = lfs_dir_alloc(lfs, &root);
if (err) {
goto cleanup;
}
err = lfs_dir_commit(lfs, &root, NULL, 0);
if (err) {
goto cleanup;
}
lfs->root[0] = root.pair[0];
lfs->root[1] = root.pair[1];
// write superblocks
lfs_superblock_t superblock = {
.off = sizeof(superdir.d),
.d.type = LFS_TYPE_SUPERBLOCK,
.d.elen = sizeof(superblock.d) - sizeof(superblock.d.magic) - 4,
.d.nlen = sizeof(superblock.d.magic),
.d.version = LFS_DISK_VERSION,
.d.magic = {"littlefs"},
.d.block_size = lfs->cfg->block_size,
.d.block_count = lfs->cfg->block_count,
.d.root = {lfs->root[0], lfs->root[1]},
};
superdir.d.tail[0] = root.pair[0];
superdir.d.tail[1] = root.pair[1];
superdir.d.size = sizeof(superdir.d) + sizeof(superblock.d) + 4;
// write both pairs to be safe
lfs_superblock_tole32(&superblock.d);
bool valid = false;
for (int i = 0; i < 2; i++) {
err = lfs_dir_commit(lfs, &superdir, (struct lfs_region[]){
{sizeof(superdir.d), sizeof(superblock.d),
&superblock.d, sizeof(superblock.d)}
}, 1);
if (err && err != LFS_ERR_CORRUPT) {
goto cleanup; goto cleanup;
} }
valid = valid || !err; // write root directory
} lfs_dir_t root;
err = lfs_dir_alloc(lfs, &root);
if (err) {
goto cleanup;
}
if (!valid) { err = lfs_dir_commit(lfs, &root, NULL, 0);
err = LFS_ERR_CORRUPT; if (err) {
goto cleanup; goto cleanup;
} }
// sanity check that fetch works lfs->root[0] = root.pair[0];
err = lfs_dir_fetch(lfs, &superdir, (const lfs_block_t[2]){0, 1}); lfs->root[1] = root.pair[1];
if (err) {
goto cleanup;
}
lfs_alloc_ack(lfs); // write superblocks
lfs_superblock_t superblock = {
.off = sizeof(superdir.d),
.d.type = LFS_TYPE_SUPERBLOCK,
.d.elen = sizeof(superblock.d) - sizeof(superblock.d.magic) - 4,
.d.nlen = sizeof(superblock.d.magic),
.d.version = LFS_DISK_VERSION,
.d.magic = {"littlefs"},
.d.block_size = lfs->cfg->block_size,
.d.block_count = lfs->cfg->block_count,
.d.root = {lfs->root[0], lfs->root[1]},
};
superdir.d.tail[0] = root.pair[0];
superdir.d.tail[1] = root.pair[1];
superdir.d.size = sizeof(superdir.d) + sizeof(superblock.d) + 4;
// write both pairs to be safe
lfs_superblock_tole32(&superblock.d);
bool valid = false;
for (int i = 0; i < 2; i++) {
err = lfs_dir_commit(lfs, &superdir, (struct lfs_region[]){
{sizeof(superdir.d), sizeof(superblock.d),
&superblock.d, sizeof(superblock.d)}
}, 1);
if (err && err != LFS_ERR_CORRUPT) {
goto cleanup;
}
valid = valid || !err;
}
if (!valid) {
err = LFS_ERR_CORRUPT;
goto cleanup;
}
// sanity check that fetch works
err = lfs_dir_fetch(lfs, &superdir, (const lfs_block_t[2]){0, 1});
if (err) {
goto cleanup;
}
lfs_alloc_ack(lfs);
}
cleanup: cleanup:
lfs_deinit(lfs); lfs_deinit(lfs);
@ -2177,68 +2185,59 @@ cleanup:
} }
int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) { int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
int err = lfs_init(lfs, cfg); int err = 0;
if (err) { if (true) {
return err; err = lfs_init(lfs, cfg);
}
// setup free lookahead
lfs->free.off = 0;
lfs->free.size = 0;
lfs->free.i = 0;
lfs_alloc_ack(lfs);
// load superblock
lfs_dir_t dir;
lfs_superblock_t superblock;
err = lfs_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
if (err && err != LFS_ERR_CORRUPT) {
goto cleanup;
}
if (!err) {
err = lfs_bd_read(lfs, dir.pair[0], sizeof(dir.d),
&superblock.d, sizeof(superblock.d));
lfs_superblock_fromle32(&superblock.d);
if (err) { if (err) {
return err;
}
// setup free lookahead
lfs->free.off = 0;
lfs->free.size = 0;
lfs->free.i = 0;
lfs_alloc_ack(lfs);
// load superblock
lfs_dir_t dir;
lfs_superblock_t superblock;
err = lfs_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
if (err && err != LFS_ERR_CORRUPT) {
goto cleanup; goto cleanup;
} }
lfs->root[0] = superblock.d.root[0]; if (!err) {
lfs->root[1] = superblock.d.root[1]; err = lfs_bd_read(lfs, dir.pair[0], sizeof(dir.d),
if (lfs_paircmp(lfs->root, dir.d.tail) != 0) { &superblock.d, sizeof(superblock.d));
lfs_superblock_fromle32(&superblock.d);
if (err) {
goto cleanup;
}
lfs->root[0] = superblock.d.root[0];
lfs->root[1] = superblock.d.root[1];
}
if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
LFS_ERROR("Invalid superblock at %d %d", 0, 1);
err = LFS_ERR_CORRUPT; err = LFS_ERR_CORRUPT;
goto cleanup; goto cleanup;
} }
}
if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) { uint16_t major_version = (0xffff & (superblock.d.version >> 16));
LFS_ERROR("Invalid superblock at %d %d", 0, 1); uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
err = LFS_ERR_CORRUPT; if ((major_version != LFS_DISK_VERSION_MAJOR ||
goto cleanup; minor_version > LFS_DISK_VERSION_MINOR)) {
} LFS_ERROR("Invalid version %d.%d", major_version, minor_version);
err = LFS_ERR_INVAL;
uint16_t major_version = (0xffff & (superblock.d.version >> 16));
uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
if ((major_version != LFS_DISK_VERSION_MAJOR ||
minor_version > LFS_DISK_VERSION_MINOR)) {
LFS_ERROR("Invalid version %d.%d", major_version, minor_version);
err = LFS_ERR_INVAL;
goto cleanup;
}
// verify that no metadata pairs are corrupt
while (!lfs_pairisnull(dir.d.tail)) {
err = lfs_dir_fetch(lfs, &dir, dir.d.tail);
if (err) {
goto cleanup; goto cleanup;
} }
return 0;
} }
// succuessfully mounted
return 0;
cleanup: cleanup:
lfs_deinit(lfs); lfs_deinit(lfs);
return err; return err;
} }
@ -2481,10 +2480,14 @@ int lfs_deorphan(lfs_t *lfs) {
} }
lfs_dir_t pdir = {.d.size = 0x80000000}; lfs_dir_t pdir = {.d.size = 0x80000000};
lfs_dir_t cwd = {.d.tail = {0,1}}; lfs_dir_t cwd = {.d.tail[0] = 0, .d.tail[1] = 1};
// iterate over all directory directory entries // iterate over all directory directory entries
while (!lfs_pairisnull(cwd.d.tail)) { for (lfs_size_t i = 0; i < lfs->cfg->block_count; i++) {
if (lfs_pairisnull(cwd.d.tail)) {
return 0;
}
int err = lfs_dir_fetch(lfs, &cwd, cwd.d.tail); int err = lfs_dir_fetch(lfs, &cwd, cwd.d.tail);
if (err) { if (err) {
return err; return err;
@ -2513,7 +2516,7 @@ int lfs_deorphan(lfs_t *lfs) {
return err; return err;
} }
break; return 0;
} }
if (!lfs_pairsync(entry.d.u.dir, pdir.d.tail)) { if (!lfs_pairsync(entry.d.u.dir, pdir.d.tail)) {
@ -2529,7 +2532,7 @@ int lfs_deorphan(lfs_t *lfs) {
return err; return err;
} }
break; return 0;
} }
} }
@ -2574,5 +2577,7 @@ int lfs_deorphan(lfs_t *lfs) {
memcpy(&pdir, &cwd, sizeof(pdir)); memcpy(&pdir, &cwd, sizeof(pdir));
} }
return 0; // If we reached here, we have more directory pairs than blocks in the
// filesystem... So something must be horribly wrong
return LFS_ERR_CORRUPT;
} }

View File

@ -21,7 +21,7 @@ extern "C"
// Software library version // Software library version
// Major (top-nibble), incremented on backwards incompatible changes // Major (top-nibble), incremented on backwards incompatible changes
// Minor (bottom-nibble), incremented on feature additions // Minor (bottom-nibble), incremented on feature additions
#define LFS_VERSION 0x00010006 #define LFS_VERSION 0x00010007
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16)) #define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0)) #define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
@ -49,6 +49,11 @@ typedef uint32_t lfs_block_t;
#define LFS_NAME_MAX 255 #define LFS_NAME_MAX 255
#endif #endif
// Max file size in bytes
#ifndef LFS_FILE_MAX
#define LFS_FILE_MAX 2147483647
#endif
// Possible error codes, these are negative to allow // Possible error codes, these are negative to allow
// valid positive return values // valid positive return values
enum lfs_error { enum lfs_error {
@ -61,6 +66,7 @@ enum lfs_error {
LFS_ERR_ISDIR = -21, // Entry is a dir LFS_ERR_ISDIR = -21, // Entry is a dir
LFS_ERR_NOTEMPTY = -39, // Dir is not empty LFS_ERR_NOTEMPTY = -39, // Dir is not empty
LFS_ERR_BADF = -9, // Bad file number LFS_ERR_BADF = -9, // Bad file number
LFS_ERR_FBIG = -27, // File too large
LFS_ERR_INVAL = -22, // Invalid parameter LFS_ERR_INVAL = -22, // Invalid parameter
LFS_ERR_NOSPC = -28, // No space left on device LFS_ERR_NOSPC = -28, // No space left on device
LFS_ERR_NOMEM = -12, // No more memory available LFS_ERR_NOMEM = -12, // No more memory available
@ -280,6 +286,7 @@ typedef struct lfs {
lfs_free_t free; lfs_free_t free;
bool deorphaned; bool deorphaned;
bool moving;
} lfs_t; } lfs_t;

View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
# This script replaces prefixes of files, and symbols in that file.
# Useful for creating different versions of the codebase that don't
# conflict at compile time.
#
# example:
# $ ./scripts/prefix.py lfs2
import os
import os.path
import re
import glob
import itertools
import tempfile
import shutil
import subprocess
DEFAULT_PREFIX = "lfs"
def subn(from_prefix, to_prefix, name):
name, count1 = re.subn('\\b'+from_prefix, to_prefix, name)
name, count2 = re.subn('\\b'+from_prefix.upper(), to_prefix.upper(), name)
name, count3 = re.subn('\\B-D'+from_prefix.upper(),
'-D'+to_prefix.upper(), name)
return name, count1+count2+count3
def main(from_prefix, to_prefix=None, files=None):
if not to_prefix:
from_prefix, to_prefix = DEFAULT_PREFIX, from_prefix
if not files:
files = subprocess.check_output([
'git', 'ls-tree', '-r', '--name-only', 'HEAD']).split()
for oldname in files:
# Rename any matching file names
newname, namecount = subn(from_prefix, to_prefix, oldname)
if namecount:
subprocess.check_call(['git', 'mv', oldname, newname])
# Rename any prefixes in file
count = 0
with open(newname+'~', 'w') as tempf:
with open(newname) as newf:
for line in newf:
line, n = subn(from_prefix, to_prefix, line)
count += n
tempf.write(line)
shutil.copystat(newname, newname+'~')
os.rename(newname+'~', newname)
subprocess.check_call(['git', 'add', newname])
# Summary
print '%s: %d replacements' % (
'%s -> %s' % (oldname, newname) if namecount else oldname,
count)
if __name__ == "__main__":
import sys
sys.exit(main(*sys.argv[1:]))

View File

@ -32,18 +32,18 @@ lfs_alloc_singleproc() {
tests/test.py << TEST tests/test.py << TEST
const char *names[] = {"bacon", "eggs", "pancakes"}; const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;
for (int n = 0; n < sizeof(names)/sizeof(names[0]); n++) { for (unsigned n = 0; n < sizeof(names)/sizeof(names[0]); n++) {
sprintf((char*)buffer, "$1/%s", names[n]); sprintf((char*)buffer, "$1/%s", names[n]);
lfs_file_open(&lfs, &file[n], (char*)buffer, lfs_file_open(&lfs, &file[n], (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0; LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
} }
for (int n = 0; n < sizeof(names)/sizeof(names[0]); n++) { for (unsigned n = 0; n < sizeof(names)/sizeof(names[0]); n++) {
size = strlen(names[n]); size = strlen(names[n]);
for (int i = 0; i < $SIZE; i++) { for (int i = 0; i < $SIZE; i++) {
lfs_file_write(&lfs, &file[n], names[n], size) => size; lfs_file_write(&lfs, &file[n], names[n], size) => size;
} }
} }
for (int n = 0; n < sizeof(names)/sizeof(names[0]); n++) { for (unsigned n = 0; n < sizeof(names)/sizeof(names[0]); n++) {
lfs_file_close(&lfs, &file[n]) => 0; lfs_file_close(&lfs, &file[n]) => 0;
} }
lfs_unmount(&lfs) => 0; lfs_unmount(&lfs) => 0;

View File

@ -326,13 +326,42 @@ tests/test.py << TEST
lfs_unmount(&lfs) => 0; lfs_unmount(&lfs) => 0;
TEST TEST
echo "--- Multi-block rename ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "cactus/test%d", i);
sprintf((char*)wbuffer, "cactus/tedd%d", i);
lfs_rename(&lfs, (char*)buffer, (char*)wbuffer) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir[0], "cactus") => 0;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "tedd%d", i);
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, (char*)buffer) => 0;
info.type => LFS_TYPE_DIR;
}
lfs_dir_read(&lfs, &dir[0], &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block remove ---" echo "--- Multi-block remove ---"
tests/test.py << TEST tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "cactus") => LFS_ERR_NOTEMPTY; lfs_remove(&lfs, "cactus") => LFS_ERR_NOTEMPTY;
for (int i = 0; i < $LARGESIZE; i++) { for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "cactus/test%d", i); sprintf((char*)buffer, "cactus/tedd%d", i);
lfs_remove(&lfs, (char*)buffer) => 0; lfs_remove(&lfs, (char*)buffer) => 0;
} }
@ -391,13 +420,43 @@ tests/test.py << TEST
lfs_unmount(&lfs) => 0; lfs_unmount(&lfs) => 0;
TEST TEST
echo "--- Multi-block rename with files ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "prickly-pear/test%d", i);
sprintf((char*)wbuffer, "prickly-pear/tedd%d", i);
lfs_rename(&lfs, (char*)buffer, (char*)wbuffer) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir[0], "prickly-pear") => 0;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "tedd%d", i);
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, (char*)buffer) => 0;
info.type => LFS_TYPE_REG;
info.size => 6;
}
lfs_dir_read(&lfs, &dir[0], &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block remove with files ---" echo "--- Multi-block remove with files ---"
tests/test.py << TEST tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY; lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY;
for (int i = 0; i < $LARGESIZE; i++) { for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "prickly-pear/test%d", i); sprintf((char*)buffer, "prickly-pear/tedd%d", i);
lfs_remove(&lfs, (char*)buffer) => 0; lfs_remove(&lfs, (char*)buffer) => 0;
} }

View File

@ -301,7 +301,7 @@ tests/test.py << TEST
size = strlen("hedgehoghog"); size = strlen("hedgehoghog");
const lfs_soff_t offsets[] = {512, 1020, 513, 1021, 511, 1019}; const lfs_soff_t offsets[] = {512, 1020, 513, 1021, 511, 1019};
for (int i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) { for (unsigned i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) {
lfs_soff_t off = offsets[i]; lfs_soff_t off = offsets[i];
memcpy(buffer, "hedgehoghog", size); memcpy(buffer, "hedgehoghog", size);
lfs_file_seek(&lfs, &file[0], off, LFS_SEEK_SET) => off; lfs_file_seek(&lfs, &file[0], off, LFS_SEEK_SET) => off;

View File

@ -23,14 +23,14 @@ tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) { for (unsigned i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) {
sprintf((char*)buffer, "hairyhead%d", i); sprintf((char*)buffer, "hairyhead%d", i);
lfs_file_open(&lfs, &file[0], (const char*)buffer, lfs_file_open(&lfs, &file[0], (const char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0; LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
strcpy((char*)buffer, "hair"); strcpy((char*)buffer, "hair");
size = strlen((char*)buffer); size = strlen((char*)buffer);
for (int j = 0; j < startsizes[i]; j += size) { for (lfs_off_t j = 0; j < startsizes[i]; j += size) {
lfs_file_write(&lfs, &file[0], buffer, size) => size; lfs_file_write(&lfs, &file[0], buffer, size) => size;
} }
lfs_file_size(&lfs, &file[0]) => startsizes[i]; lfs_file_size(&lfs, &file[0]) => startsizes[i];
@ -55,13 +55,13 @@ tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) { for (unsigned i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) {
sprintf((char*)buffer, "hairyhead%d", i); sprintf((char*)buffer, "hairyhead%d", i);
lfs_file_open(&lfs, &file[0], (const char*)buffer, LFS_O_RDWR) => 0; lfs_file_open(&lfs, &file[0], (const char*)buffer, LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file[0]) => hotsizes[i]; lfs_file_size(&lfs, &file[0]) => hotsizes[i];
size = strlen("hair"); size = strlen("hair");
int j = 0; lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i]; j += size) { for (; j < startsizes[i] && j < hotsizes[i]; j += size) {
lfs_file_read(&lfs, &file[0], buffer, size) => size; lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "hair", size) => 0; memcmp(buffer, "hair", size) => 0;
@ -87,13 +87,13 @@ tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) { for (unsigned i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) {
sprintf((char*)buffer, "hairyhead%d", i); sprintf((char*)buffer, "hairyhead%d", i);
lfs_file_open(&lfs, &file[0], (const char*)buffer, LFS_O_RDONLY) => 0; lfs_file_open(&lfs, &file[0], (const char*)buffer, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file[0]) => coldsizes[i]; lfs_file_size(&lfs, &file[0]) => coldsizes[i];
size = strlen("hair"); size = strlen("hair");
int j = 0; lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i]; for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i];
j += size) { j += size) {
lfs_file_read(&lfs, &file[0], buffer, size) => size; lfs_file_read(&lfs, &file[0], buffer, size) => size;