Multilingual dictionaries and keyboard layouts

This commit is contained in:
2026-03-06 04:49:51 +00:00
parent f20fa6110d
commit 895e04d6ce
70 changed files with 195109 additions and 1569 deletions

1
Cargo.lock generated
View File

@@ -1226,6 +1226,7 @@ dependencies = [
"criterion",
"crossterm 0.28.1",
"dirs",
"icu_normalizer",
"rand",
"ratatui",
"reqwest",

View File

@@ -19,6 +19,7 @@ chrono = { version = "0.4", features = ["serde"] }
anyhow = "1.0"
thiserror = "2.0"
reqwest = { version = "0.12", features = ["blocking"], optional = true }
icu_normalizer = { version = "2.1", default-features = false, features = ["compiled_data"] }
[dev-dependencies]
tempfile = "3"

View File

@@ -8,10 +8,15 @@
- Upstream license: GNU Affero General Public License v3.0
- Local upstream license copy (for local research clone): `clones/keybr.com/LICENSE`
1. `assets/words-en.json`
- Source: `clones/keybr.com/packages/keybr-content-words/lib/data/words-en.json`
- Status: included in this repository and used at runtime by `src/generator/dictionary.rs`
1. `assets/dictionaries/words-*.json` (seeded Latin-script set)
- Sources: `clones/keybr.com/packages/keybr-content-words/lib/data/words-<lang>.json`
- Included language keys: `en, de, es, fr, it, pt, nl, sv, da, nb, fi, pl, cs, ro, hr, hu, lt, lv, sl, et, tr`
- Status: included in this repository and available to `src/generator/dictionary.rs`
- Modifications: none (byte-identical at the time of import)
- Integrity metadata:
- `assets/dictionaries/manifest.tsv` (language/file/source mapping)
- `assets/dictionaries/SHA256SUMS` (checksum manifest)
- `assets/dictionaries/words-<lang>.json.license` (per-file provenance/license sidecar)
## Local research clones (not committed to this repository)
@@ -55,7 +60,7 @@ architecture/algorithm ideas:
- keybr-code
For these references, no direct code/data inclusion is claimed in this repository
except the explicitly documented `assets/words-en.json` import from keybr.com.
except the explicitly documented `assets/dictionaries/words-*.json` imports from keybr.com.
## Notes

View File

@@ -0,0 +1,21 @@
30a78612b478f8f9101e200b96ddf2807720a2b513ec6d05a73abdde99354407 words-cs.json
8098e39c9deb00db59d85f82c9bc791536b51c8fa2a5b688f771f120e83bbc26 words-da.json
014d7ff2f7756b1a0775b975e325bf75076770f0d4e6f9ebed771fa6aacb7ed5 words-de.json
067adf66de5f0a7ca17f3bf187bab378d8ad71e87856e4a25a208905404b949a words-en.json
fffcb910f0012e62215bfa2a8ed34ecc3d54cbf04a658c3bce5bee8148abf634 words-es.json
bfd0d22dbc129c3d693d5afbf39aaa5506c0c723bf5bb51ef10edd2af3f1c71d words-et.json
2530c4a37311fb93d6f687edb08534eca71f4c775e1a01fca405c783361386fa words-fi.json
3b177fcca8f275cce555ac954fcbbb945b14626a8a235993ff9e9d9767005517 words-fr.json
f439f8bf16f65f8600642906a0967dc1f99992f6a2f3b830bd77554ccb6a07de words-hr.json
44ec5436364a162dc7774be3c40a4678247aa2909eaceefac7f49b3bc00811f5 words-hu.json
03361069ce40d08fa931709ce402811d0f484c32d03878706ad4dcc5e709b01a words-it.json
9239f4042d67127859b3a56da29a6f3df4cd458776483adf561b668f3e646579 words-lt.json
ad24ebd9a36c012ebb8db3db78af5e6038d26b64b3f173885c4a606ab17d3d49 words-lv.json
be83a2cff75097db957575425b4dec658006c9b9e43fdcb7a6eb92701818b752 words-nb.json
0f701c9e5c891dd557a0f4f3a6903b4c9762a2a898f749caccb59efbee189271 words-nl.json
d99e00fb85890847ba783354e148ef835d44faee95c4d7ec227d589cf5b072d3 words-pl.json
fa3009988d7be559a78b6b2c2198628750de77d77e0ee360d8bf5cc8eab84368 words-pt.json
76ec930a9b6aaa8092f2179b0918d71ec61f139843d985f5f25eae07bd7093fc words-ro.json
2960c6db414abb22505a4f78d8292df2b45d7332144302296055fc5a8ee07e23 words-sl.json
154e1b905d10130fee0160d3e2f30bd6445e8da1e3251475df37be364a81bd17 words-sv.json
95f6e867ef64d6a1ddd90f82d574d38b4a2be19d550f613ee87fc3e1701a0d8e words-tr.json

View File

@@ -0,0 +1,22 @@
# language_key file license_file source
en words-en.json words-en.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-en.json
de words-de.json words-de.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-de.json
es words-es.json words-es.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-es.json
fr words-fr.json words-fr.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-fr.json
it words-it.json words-it.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-it.json
pt words-pt.json words-pt.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-pt.json
nl words-nl.json words-nl.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-nl.json
sv words-sv.json words-sv.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-sv.json
da words-da.json words-da.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-da.json
nb words-nb.json words-nb.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-nb.json
fi words-fi.json words-fi.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-fi.json
pl words-pl.json words-pl.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-pl.json
cs words-cs.json words-cs.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-cs.json
ro words-ro.json words-ro.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-ro.json
hr words-hr.json words-hr.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-hr.json
hu words-hu.json words-hu.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-hu.json
lt words-lt.json words-lt.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-lt.json
lv words-lv.json words-lv.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-lv.json
sl words-sl.json words-sl.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-sl.json
et words-et.json words-et.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-et.json
tr words-tr.json words-tr.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-tr.json
1 # language_key file license_file source
2 en words-en.json words-en.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-en.json
3 de words-de.json words-de.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-de.json
4 es words-es.json words-es.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-es.json
5 fr words-fr.json words-fr.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-fr.json
6 it words-it.json words-it.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-it.json
7 pt words-pt.json words-pt.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-pt.json
8 nl words-nl.json words-nl.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-nl.json
9 sv words-sv.json words-sv.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-sv.json
10 da words-da.json words-da.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-da.json
11 nb words-nb.json words-nb.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-nb.json
12 fi words-fi.json words-fi.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-fi.json
13 pl words-pl.json words-pl.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-pl.json
14 cs words-cs.json words-cs.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-cs.json
15 ro words-ro.json words-ro.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-ro.json
16 hr words-hr.json words-hr.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-hr.json
17 hu words-hu.json words-hu.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-hu.json
18 lt words-lt.json words-lt.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-lt.json
19 lv words-lv.json words-lv.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-lv.json
20 sl words-sl.json words-sl.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-sl.json
21 et words-et.json words-et.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-et.json
22 tr words-tr.json words-tr.json.license clones/keybr.com/packages/keybr-content-words/lib/data/words-tr.json

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-cs.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-da.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-de.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-es.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-et.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-fi.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-fr.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-hr.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-hu.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-it.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-lt.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-lv.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-nb.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-nl.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-pl.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-pt.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-ro.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-sl.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-sv.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
This file is sourced from keybr.com:
clones/keybr.com/packages/keybr-content-words/lib/data/words-tr.json
Upstream project: https://github.com/aradzie/keybr.com
Upstream license: GNU Affero General Public License v3.0
Local project license: AGPL-3.0-only (see /LICENSE).

View File

@@ -5,9 +5,19 @@ This repository includes AGPL-licensed upstream material and is licensed as
## What is included in-repo
- `assets/words-en.json` is imported from keybr.com and tracked in
- `assets/dictionaries/words-*.json` are imported from keybr.com and tracked in
`THIRD_PARTY_NOTICES.md`.
- `assets/words-en.json.license` records source and license for the imported file.
- `assets/dictionaries/words-<lang>.json.license` records source and license for
each imported dictionary file.
- `assets/dictionaries/manifest.tsv` maps language keys to imported files/sources.
- `assets/dictionaries/SHA256SUMS` stores dictionary checksums for integrity verification.
- `scripts/validate_dictionary_manifest.sh` validates manifest entries, sidecars,
and checksums.
- `scripts/derive_primary_letter_sequences.py` derives per-language primary-letter
sequence seed data from dictionary frequency.
- `assets/dictionaries/primary-letter-sequences.tsv` stores the current derived output.
- `docs/unicode-normalization-policy.md` documents NFC normalization policy and
equivalence expectations.
## What is research-only

View File

@@ -0,0 +1,572 @@
# keydr Multilingual Dictionary + Keyboard Layout Internationalization Plan
## Context
We currently use an English-only dictionary and an ASCII-centric adaptive model:
- Dictionary is hardcoded to `assets/words-en.json` in `src/generator/dictionary.rs`.
- Dictionary ingestion filters to ASCII lowercase only (`is_ascii_lowercase`).
- Transition table building (`src/generator/transition_table.rs`) skips non-ASCII words.
- Adaptive drill generation in `src/app.rs` builds lowercase filter from `is_ascii_lowercase`.
- Skill tree lowercase branch is fixed to English `a-z` frequency in `src/engine/skill_tree.rs`.
- Keyboard rendering/hit-testing logic has hardcoded row offsets and row count assumptions in `src/ui/components/keyboard_diagram.rs` and `src/ui/components/stats_dashboard.rs`.
## Explicit product decision: clean break
This app is currently work-in-progress and has no real user base. We explicitly do
not need to preserve old config/state/export compatibility for this change. If data
must be recreated from scratch, that is acceptable.
## Goals
1. Add user-selectable dictionary language (default `en`) using keybr-provided dictionary files.
2. Add user-selectable keyboard layout profiles for multiple languages.
3. Ensure keyboard visualizations, explorer, and stats heatmaps render correctly for variable row shapes and non-English keycaps.
4. Use a clean-break implementation with no backward-compatibility requirements.
5. Maintain license compliance for newly imported dictionaries.
## Non-goals (first delivery)
1. Full IME/dead-key composition support.
2. Full rewrite of adaptive model for every script from day one.
3. Perfect locale-specific pedagogy for all languages in phase 1.
4. Backward compatibility for old config/profile/export data.
## Execution constraints (must be explicit before implementation)
1. **Unicode normalization policy:** Use NFC as canonical storage/matching form for dictionary ingestion, generated text, keystroke comparison, and persisted stats keys. Do not use NFKC in phase 1 to avoid compatibility-fold surprises.
2. **Character equivalence policy:** Equality is by normalized scalar sequence (NFC), not by glyph appearance. Composed/decomposed equivalents must compare equal after normalization.
3. **Clean-break schema cutover policy:** This rollout uses hard reset semantics for old unscoped stats/profile files. On first run of the new schema version, old files are ignored (optionally archived with `.legacy` suffix); no partial migration path.
4. **Capability gating policy:** Only language/layout pairs marked supported in the registry capability matrix are selectable in UI during phased rollout.
5. **Performance envelope policy:** Keyboard geometry recomputation must be bounded and cached by profile key + render mode + viewport size.
## Upstream data availability
`keybr-content-words` includes dictionaries for:
`ar, be, cs, da, de, el, en, es, et, fa, fi, fr, he, hr, hu, it, ja, lt, lv, nb, nl, pl, pt, ro, ru, sl, sv, th, tr, uk`
Recommended rollout strategy:
- Initial support for Latin-script languages first (`en, de, es, fr, it, pt, nl, sv, da, nb, fi, pl, cs, ro, hr, hu, lt, lv, sl, et, tr`).
- Later support for non-Latin scripts (`el, ru, uk, be, ar, fa, he, ja, th`) after script-specific input/model behavior is in place.
---
## Key Architectural Decisions
### 1) Language Pack registry
Add a registry module (e.g. `src/l10n/language_pack.rs`) containing:
- `language_key`
- `display_name`
- `script`
- `dictionary_asset_id`
- `supported_keyboard_layout_keys`
- `primary_letter_sequence` (for ranked progression)
- `starter_weights` and optional `vowel_set` for generator fallback behavior
- `support_level` (`full`, `experimental`, `blocked`)
- `normalization_form` (phase 1 fixed to `NFC`)
- `input_capabilities` (for example `direct_letters_only`, `needs_ime`)
This becomes the single source of truth for language behavior.
### 2) Runtime dictionary/generator rebuild is required
Changing `dictionary_language` must immediately take effect without restart.
Implement `App::rebuild_language_assets(&mut self)` that rebuilds:
- `Dictionary`
- `TransitionTable`
- any cached generator state derived from language assets
- focused-character transforms derived from language rules
- drill-generation allowlists that depend on language pack data
Call it whenever language or language-dependent layout changes in settings.
`rebuild_language_assets` must also refresh capitalization/case behavior inputs used by adaptive generation.
`rebuild_language_assets` invalidation contract (required):
- always invalidate and rebuild `Dictionary` and `TransitionTable`
- clear adaptive cross-drill dictionary history cache
- clear/refresh any cached language-specific focus mapping
- do **not** mutate in-progress drill text
- all newly generated drills after rebuild must use new language assets
### 3) Asset loading strategy: compile-time embedded assets
For Phase 1 scope, dictionaries will be embedded at compile-time (generated asset map + `include_str!`/equivalent), not runtime file discovery.
Rationale:
- deterministic packaging
- no runtime path resolution complexity
- simpler cross-platform behavior
Tradeoff: larger binary size, acceptable for this phase.
### 4) Transition table fallback strategy
`TransitionTable::build_english()` will be gated to `language_key == "en"` only.
For non-English languages:
- use dictionary-derived transition table only
- if sparse, degrade gracefully to simple dictionary sampling behavior rather than English fallback model
### 5) Keyboard geometry refactor strategy
`src/ui/components/keyboard_diagram.rs` is a substantial refactor (all render and hit-test paths).
Implement shared `KeyboardGeometry` computed once per render context and consumed by:
- compact/full/fallback renderers
- all key hit-testing paths
- shift hit-testing paths
No duplicate hardcoded offsets should remain.
Performance constraints for geometry:
- geometry cache key: `(layout_key, render_mode, viewport_width, viewport_height)`
- recompute only when cache key changes
- hit-testing must be O(number_of_keys) or better per event with no per-key allocation
- include a benchmark/smoke check to detect regressions in repeated render/hit-test loops
### 6) Finger assignment source of truth
Finger assignment must be profile metadata, not inferred by QWERTY column heuristics.
Each keyboard profile defines finger mapping for each physical key position.
### 7) Stats isolation strategy
Stats are language-scoped and layout-scoped.
Adopt per-scope storage files (for example):
- `key_stats_<language>_<layout>.json`
- `key_stats_ranked_<language>_<layout>.json`
- optional scoped drill history files
No mixed-language key stats in a single store.
Profile/scoring scoping policy:
- `skill_tree` progress is language-scoped (at minimum by `language_key`).
- `total_score`, `total_drills`, `streak_days`, and `best_streak` remain global.
- `ProfileData` will separate global fields from language-scoped progression state.
Scoped-file discovery mechanism:
- registry-driven + current-config driven only
- app loads current scope directly and only enumerates scopes from supported language/layout registry pairs
- no unconstrained glob-based discovery of arbitrary stale files
Import/export strategy for scoped stats:
- export bundles all supported scoped stats files present in the data dir
- each bundle entry includes explicit `language_key` and `layout_key` metadata
- import applies two-phase commit per scoped target file
- export/import also includes language-scoped `skill_tree` progress entries with `language_key` metadata
Atomicity requirements for scoped import:
- stage writes to `<target>.tmp`
- flush file contents (`sync_all`) before rename
- rename temp file onto target atomically where supported
- on any failure, remove temp file and keep existing target untouched
- no commit of partially imported scope bundles
### 8) Settings architecture
Current index-based settings handling is fragile.
Phase 1 includes refactor from positional integer indices to enum/struct-based settings entries before adding multilingual controls.
Profile key validation must be registry-backed. Do not rely on `KeyboardModel::from_name()` fallback behavior.
Validation error taxonomy (typed, stable):
- `UnknownLanguage`
- `UnknownLayout`
- `UnsupportedLanguageLayoutPair`
- `LanguageBlockedBySupportLevel`
UI must show deterministic user-facing error text for each class (used by tests).
In-progress drill behavior on language/layout change:
- language/layout changes rebuild assets immediately for future generation
- current in-progress drill text is not mutated mid-drill
- new language/layout applies on the next drill generation
### 9) Unicode handling architecture
Define one shared Unicode utility module used by dictionary ingestion, generators, and input matching:
- normalize all dictionary entries to NFC at load time
- normalize typed characters before comparison against expected text
- normalize persisted per-key identifiers before write/read
- provide helper tests for composed/decomposed equivalence (for example `é` vs `e + ◌́`)
### 10) Rollout capability matrix architecture
Add a single registry-backed capability matrix keyed by `(language_key, layout_key)`:
- `enabled`: selectable and fully supported
- `preview`: selectable with warning banner
- `disabled`: visible but not selectable
Phase-gating must read this matrix in settings and selection screens; no ad-hoc checks.
---
## Phased Implementation
## Phase 0: Data + compliance groundwork
### Tasks
1. Import selected dictionaries to `assets/dictionaries/words-<lang>.json`.
2. Add sidecar license/provenance files for each imported dictionary.
3. Update `THIRD_PARTY_NOTICES.md` with imported assets.
4. Add validation script for dictionary manifest/checksums.
5. Define language pack registry seed data (including temporary `primary_letter_sequence` values).
6. Add `support_level` and capability-matrix seed entries for every language/layout pair.
7. Add a build-time utility that derives letter frequency sequence from each dictionary (seed data source of truth; manual overrides allowed but documented).
8. Write `docs/unicode-normalization-policy.md` (NFC/equivalence rules + examples).
### Verification
1. All imported dictionaries listed in third-party notices.
2. Sidecar license/provenance file exists for each imported dictionary.
3. Manifest validation script passes.
4. Build-time frequency derivation utility emits reproducible output for seeded languages.
5. Unicode policy doc exists and includes composed/decomposed test cases.
---
## Phase 1: Settings and configuration foundation
### Tasks
1. Add `dictionary_language` to `Config`.
2. Refactor settings implementation from raw indices to typed settings entries (enum/descriptor model).
3. Add settings controls for:
- dictionary language
- canonical keyboard layout profile key
4. Implement explicit invalid combination handling (reject with message), not silent fallback.
5. Wire language/layout change actions to `App::rebuild_language_assets(&mut self)`.
6. Introduce clean-break schema/version update for config/profile/store formats with hard-reset behavior for old files.
7. Replace `from_name` wildcard fallback paths with explicit lookup failure handling tied to registry validation.
8. Update import/export schema and transaction flow for scoped stats bundles.
9. Split profile persistence into global fields + language-scoped skill tree progress map.
10. Enforce capability-matrix gating in settings/selectors (`enabled/preview/disabled` states).
11. Add typed validation errors and stable user-facing status messages.
### Code areas
- `src/config.rs`
- `src/main.rs` (settings UI rendering and input handling)
- `src/app.rs` (settings action handlers, rebuild trigger)
- `src/store/schema.rs`
- `src/store/json_store.rs`
### Verification
1. Unit tests for config defaults/validation.
2. Unit tests for settings navigation/editing after index refactor.
3. Runtime test: changing dictionary language updates generated drills without restart.
4. Runtime test: invalid language/layout pair is rejected with visible error/status.
5. Export/import test: scoped stats for multiple language/layout pairs round-trip correctly.
6. Runtime test: changing language mid-drill preserves current drill text and applies new language on next drill.
7. Schema cutover test: old-format files are ignored/archived and never partially loaded.
8. UI test: disabled/preview capability-matrix entries render and behave correctly.
---
## Phase 2: Dictionary, transition table, and generator internationalization
### Tasks
1. Refactor `Dictionary::load(language_key)` with embedded asset map.
2. Remove ASCII-only filtering from dictionary ingestion and transition building.
3. Extend `phonetic.rs` to remove English hardcoding:
- replace hardcoded starter biases with language-pack starter data or derived frequencies
- replace fallback `"the"` with language-aware fallback (for example: top dictionary word)
- make vowel recovery optional/parameterized by language pack
- remove `is_ascii_lowercase` focus filtering and rely on allowed-character logic
4. Implement transition fallback policy:
- `build_english()` only for English
- non-English graceful degradation path without English fallback table
5. Address adaptive and non-adaptive mode filters:
- remove hardcoded `('a'..='z')` filters in code/passage modes
- use language-pack allowed sets where applicable
6. Refactor capitalization pipeline to Unicode-aware behavior:
- replace ASCII-only case checks/conversions in `capitalize.rs`
- use Unicode case mapping and language-pack constraints
- ensure non-ASCII letters (for example `ä/Ä`, `é/É`) are handled correctly
7. Implement shared normalization utility and apply it consistently in:
- dictionary load path
- generated text comparison/matching paths
- persisted key identity paths
8. Multilingual audit checklist (required pass/fail):
- `rg -n "is_ascii" src/app.rs src/generator/*.rs` has no unreviewed hits affecting multilingual behavior
- every remaining `is_ascii*` hit has a documented justification comment or issue reference
### Code areas
- `src/generator/dictionary.rs`
- `src/generator/transition_table.rs`
- `src/generator/phonetic.rs`
- `src/generator/capitalize.rs`
- `src/app.rs` (adaptive/code/passage filter construction)
### Verification
1. Unit tests for dictionary loading per supported language.
2. Unit tests for transition table generation with non-English characters.
3. Unit tests for phonetic fallback behavior per language pack.
4. Unit tests for capitalization correctness on non-ASCII letters.
5. Regression tests for English output quality.
6. Unit tests for NFC normalization and composed/decomposed equivalence.
---
## Phase 3: Keyboard layout profile system
### Tasks
1. Replace ad-hoc constructors with canonical keyboard profile registry.
2. Add language-relevant profiles (`de_qwertz`, `fr_azerty`, etc.).
3. Add profile metadata:
- key rows and shifted/base pairs
- geometry hints
- modifier placement metadata
- per-key finger assignments
4. Remove legacy alias layer and enforce canonical profile keys.
5. Evaluate `src/keyboard/layout.rs` usage:
- if unused, delete it
- otherwise fold it into the new profile registry without duplicate sources of truth
### Code areas
- `src/keyboard/model.rs`
- `src/keyboard/layout.rs`
- `src/keyboard/display.rs` (if locale labels/short labels need extension)
- `src/config.rs`
### Verification
1. Unit tests for all canonical profile keys.
2. Unit tests for profile completeness and unique key mapping.
3. Unit tests for finger assignment coverage/consistency.
---
## Phase 4: Keyboard visualization and hit-testing refactor
### Tasks
1. Implement shared `KeyboardGeometry` used by all keyboard rendering modes.
2. Rewrite keyboard diagram rendering paths to use shared geometry.
3. Rewrite all keyboard hit-testing paths to use shared geometry.
4. Refactor stats dashboard keyboard heatmap/timing rendering to use profile geometry metadata.
5. Ensure explorer and selection logic works for variable row counts and locale keycaps.
6. Update sentinel boundary tests if new files must reference sentinel constants.
7. Remove ASCII shift-display guards in keyboard rendering:
- replace `is_ascii_alphabetic()`-based shifted display checks
- use profile-defined shiftability (`base != shifted` or explicit shiftable set)
8. Audit and replace ASCII-specific input-handling logic in `main.rs`:
- caps-lock inference
- depressed-key normalization
- shift guidance and shifted-key detection in keyboard UI paths
9. Add geometry cache and recompute guards keyed by `(layout_key, render_mode, viewport)` with benchmark coverage.
### Code areas
- `src/ui/components/keyboard_diagram.rs`
- `src/ui/components/stats_dashboard.rs`
- `src/main.rs` keyboard explorer handlers
- `src/main.rs` input handling (`handle_key`, caps/shift logic, keyboard guidance/render helpers)
- `src/app.rs` explorer state/focus use
- `src/keyboard/display.rs` tests
### Verification
1. Snapshot/golden tests for compact/full/fallback rendering per profile.
2. Hit-test roundtrip tests per profile.
3. Manual keyboard explorer smoke tests for US + non-US profiles.
4. Sentinel boundary tests pass with updated policy.
5. Manual test: shifted rendering works for non-ASCII letter keys where profile defines shifted forms.
6. Manual test: caps/shift guidance and depressed-key behavior are correct for non-ASCII key input.
7. Benchmark/smoke test: repeated render + hit-test loops meet baseline without per-frame geometry rebuild when cache key is unchanged.
---
## Phase 5: Skill tree and ranked progression internationalization
### Tasks
1. Replace fixed English lowercase progression with language-pack `primary_letter_sequence`.
2. Replace hardcoded "lowercase as background" branch logic with language-pack primary-letter background behavior.
3. Remove UI copy assumptions of "26 lowercase" and `a-z`.
4. Ensure ranked gating uses language-pack readiness (sequence + profile support).
5. Define letter-frequency derivation approach:
- derive initial sequence from dictionary frequency data (build-time utility), not hand-curated long-term
6. Milestone-copy audit checklist (required pass/fail):
- grep for hardcoded milestone language in `main.rs` (`26`, `a-z`, `A-Z`, `lowercase`)
- replace with language-pack-aware dynamic copy
- add tests asserting copy adjusts with different sequence lengths
### Code areas
- `src/engine/skill_tree.rs`
- `src/app.rs` (focus/background/filter logic)
- `src/main.rs` (milestone/help copy)
### Verification
1. Tests for progression with multiple language sequences.
2. Tests for background-branch selection correctness.
3. Snapshot tests for milestone text across languages.
---
## Phase 6: UX polish, test parameterization, and rollout
### Tasks
1. Add dedicated language/layout selector screens where needed.
- Implemented in `src/main.rs` + `src/app.rs` with `DictionaryLanguageSelect` and `KeyboardLayoutSelect`.
2. Add explicit support-matrix messaging for partially supported scripts.
- Implemented in selector + settings UI copy in `src/main.rs` (`preview`/`disabled` state messaging).
3. Add parameterized test helpers:
- language-aware allowed key sets
- expected progression counts
- profile fixtures
- Implemented via cross-language/layout fixtures and property tests in `src/l10n/language_pack.rs`, `src/engine/skill_tree.rs`, and `src/ui/components/keyboard_diagram.rs`.
4. Document that Phase 2 may temporarily allow language/dictionary mismatch with keyboard visuals until Phase 3/4 is complete.
5. Add explicit note in docs that Phase 2 mismatch window is expected and resolved by Phase 4.
- Implemented in `docs/multilingual-rollout-notes.md`.
6. Add cross-language property tests:
- key uniqueness per profile
- hit-test round-trip invariants
- progression monotonicity per language sequence
- Implemented in `src/keyboard/model.rs`, `src/ui/components/keyboard_diagram.rs`, and `src/engine/skill_tree.rs`.
### Code areas
- `src/main.rs`
- `src/app.rs`
- test modules across `src/*`
- `docs/`
### Verification
1. End-to-end manual flows for language switch + layout switch + drill generation + keyboard explorer + stats.
2. Performance checks for embedded dictionary footprint and startup latency.
3. Test suite passes with parameterized language/profile cases.
4. Property/invariant tests pass for key uniqueness, hit-test round-trip, and progression monotonicity.
---
## File-by-file Impact Matrix
### Core config and app wiring
- `src/config.rs`
- add `dictionary_language` and canonical `keyboard_layout` profile key validation
- `src/app.rs`
- add `rebuild_language_assets`
- remove ASCII-only filters and audit residual ASCII assumptions (`rg is_ascii` pass)
- wire settings actions to runtime rebuild
- `src/main.rs`
- refactor settings UI to typed entries
- add/update selectors and error/status handling
- audit/replace ASCII-specific input/caps/shift handling
### Generators and adaptive engine
- `src/generator/dictionary.rs`
- dynamic, language-aware load via embedded registry
- `src/generator/transition_table.rs`
- non-ASCII support and explicit English-only fallback gating
- `src/generator/phonetic.rs`
- remove hardcoded English starter/vowel/fallback assumptions
- `src/generator/capitalize.rs`
- replace ASCII-only casing logic with Unicode-aware capitalization rules
### Skill progression
- `src/engine/skill_tree.rs`
- language-pack primary sequence
- language-pack background branch behavior
### Keyboard modeling and visualization
- `src/keyboard/model.rs`
- canonical profile registry with per-key finger mapping
- `src/keyboard/layout.rs`
- delete or fold into model registry
- `src/ui/components/keyboard_diagram.rs`
- shared geometry + full hit-test rewrite
- `src/ui/components/stats_dashboard.rs`
- geometry-driven keyboard heatmap/timing rendering
- `src/keyboard/display.rs`
- sentinel boundary test updates as needed
### Persistence/schema
- `src/store/schema.rs`
- clean-break schema/version bump as needed
- split profile data into global fields + language-scoped skill tree progress
- `src/store/json_store.rs`
- scoped stats storage by language/layout
- scoped file discovery based on supported registry pairs
- export/import scoped bundle handling with language/layout metadata
- export/import language-scoped skill tree progress entries
### Assets/compliance/docs
- `assets/dictionaries/*`
- `assets/dictionaries/*.license`
- `THIRD_PARTY_NOTICES.md`
- `docs/license-compliance.md`
- `docs/unicode-normalization-policy.md`
---
## Risks and mitigations
1. **Risk:** Non-Latin scripts break assumptions in multiple modules.
- **Mitigation:** staged rollout by script; support matrix gating.
2. **Risk:** Keyboard visualization regressions during geometry rewrite.
- **Mitigation:** shared geometry abstraction + dedicated hit-test/render tests.
3. **Risk:** Clean-break schema reset discards local data.
- **Mitigation:** explicitly documented and accepted by product decision.
4. **Risk:** Settings refactor increases short-term scope.
- **Mitigation:** do it early to avoid repeated index-cascade bugs.
5. **Risk:** Embedded dictionary set increases binary size/startup memory.
- **Mitigation:** track size/startup metrics per release and switch to hybrid packaging if thresholds are exceeded.
---
## Definition of Done
1. Language switch updates dictionary-driven generation without restart.
2. Keyboard profiles are canonical and language-aware; no legacy alias dependency.
3. Keyboard diagram, explorer, and stats views are geometry-driven and correct for supported profiles.
4. Ranked progression uses language-pack primary sequences and background logic.
5. Code/passage/adaptive modes no longer depend on hardcoded `a-z` filters.
6. Stats are isolated by language/layout scope.
7. Skill tree progression is language-scoped while streak/score totals remain global.
8. Third-party attributions and license sidecars cover all imported dictionary assets.
9. Automated tests cover runtime rebuild, generator behavior, keyboard geometry/hit-testing, progression invariants, and parameterized language/profile cases.
10. Unicode normalization policy is implemented and tested across ingestion, generation, input matching, and persisted stats keys.
11. Clean-break schema cutover behavior is deterministic (hard-reset semantics) and covered by automated tests.
12. Capability matrix gating is enforced consistently across settings/selectors and covered by UI/runtime tests.

File diff suppressed because it is too large Load Diff

View File

@@ -15,7 +15,7 @@ use keydr::store::schema::{
DrillHistoryData, EXPORT_VERSION, ExportData, KeyStatsData, ProfileData,
};
const SCHEMA_VERSION: u32 = 2;
const SCHEMA_VERSION: u32 = 3;
const TARGET_CPM: f64 = 175.0;
// ── Helpers ──────────────────────────────────────────────────────────────
@@ -271,6 +271,28 @@ fn last_practice_date_from_drills(drills: &[DrillResult]) -> Option<String> {
.map(|d| d.timestamp.format("%Y-%m-%d").to_string())
}
fn make_profile_data(
skill_tree: SkillTreeProgress,
total_score: f64,
total_drills: u32,
streak_days: u32,
best_streak: u32,
last_practice_date: Option<String>,
) -> ProfileData {
let mut skill_tree_by_language = HashMap::new();
skill_tree_by_language.insert("en".to_string(), skill_tree.clone());
ProfileData {
schema_version: SCHEMA_VERSION,
skill_tree,
skill_tree_by_language,
total_score,
total_drills,
streak_days,
best_streak,
last_practice_date,
}
}
// ── Profile Builders ─────────────────────────────────────────────────────
fn build_profile_01() -> ExportData {
@@ -278,15 +300,7 @@ fn build_profile_01() -> ExportData {
make_skill_tree_progress(vec![(BranchId::Lowercase, BranchStatus::InProgress, 0)]);
make_export(
ProfileData {
schema_version: SCHEMA_VERSION,
skill_tree,
total_score: 0.0,
total_drills: 0,
streak_days: 0,
best_streak: 0,
last_practice_date: None,
},
make_profile_data(skill_tree, 0.0, 0, 0, 0, None),
KeyStatsStore::default(),
KeyStatsStore::default(),
Vec::new(),
@@ -340,15 +354,14 @@ fn build_profile_02() -> ExportData {
// total_score: level_from_score(x) = (x/100).sqrt() => for level 2: score ~400
make_export(
ProfileData {
schema_version: SCHEMA_VERSION,
make_profile_data(
skill_tree,
total_score: 350.0,
total_drills: 15,
streak_days: 3,
best_streak: 3,
last_practice_date: last_practice_date_from_drills(&drills),
},
350.0,
15,
3,
3,
last_practice_date_from_drills(&drills),
),
stats,
ranked_stats,
drills,
@@ -402,15 +415,14 @@ fn build_profile_03() -> ExportData {
// level ~3: score ~900
make_export(
ProfileData {
schema_version: SCHEMA_VERSION,
make_profile_data(
skill_tree,
total_score: 900.0,
total_drills: 50,
streak_days: 7,
best_streak: 7,
last_practice_date: last_practice_date_from_drills(&drills),
},
900.0,
50,
7,
7,
last_practice_date_from_drills(&drills),
),
stats,
ranked_stats,
drills,
@@ -461,15 +473,14 @@ fn build_profile_03_near_lowercase_complete() -> ExportData {
);
make_export(
ProfileData {
schema_version: SCHEMA_VERSION,
make_profile_data(
skill_tree,
total_score: 1800.0,
total_drills: 90,
streak_days: 10,
best_streak: 12,
last_practice_date: last_practice_date_from_drills(&drills),
},
1800.0,
90,
10,
12,
last_practice_date_from_drills(&drills),
),
stats,
ranked_stats,
drills,
@@ -516,15 +527,14 @@ fn build_profile_04() -> ExportData {
// level ~5: score ~2500
make_export(
ProfileData {
schema_version: SCHEMA_VERSION,
make_profile_data(
skill_tree,
total_score: 2500.0,
total_drills: 100,
streak_days: 14,
best_streak: 14,
last_practice_date: last_practice_date_from_drills(&drills),
},
2500.0,
100,
14,
14,
last_practice_date_from_drills(&drills),
),
stats,
ranked_stats,
drills,
@@ -601,15 +611,14 @@ fn build_profile_05() -> ExportData {
// level ~7: score ~5000
make_export(
ProfileData {
schema_version: SCHEMA_VERSION,
make_profile_data(
skill_tree,
total_score: 5000.0,
total_drills: 200,
streak_days: 21,
best_streak: 21,
last_practice_date: last_practice_date_from_drills(&drills),
},
5000.0,
200,
21,
21,
last_practice_date_from_drills(&drills),
),
stats,
ranked_stats,
drills,
@@ -695,15 +704,14 @@ fn build_profile_06() -> ExportData {
// level ~12: score ~15000
make_export(
ProfileData {
schema_version: SCHEMA_VERSION,
make_profile_data(
skill_tree,
total_score: 15000.0,
total_drills: 500,
streak_days: 45,
best_streak: 60,
last_practice_date: last_practice_date_from_drills(&drills),
},
15000.0,
500,
45,
60,
last_practice_date_from_drills(&drills),
),
stats,
ranked_stats,
drills,
@@ -776,15 +784,14 @@ fn build_profile_07() -> ExportData {
// level ~18: score ~35000
make_export(
ProfileData {
schema_version: SCHEMA_VERSION,
make_profile_data(
skill_tree,
total_score: 35000.0,
total_drills: 800,
streak_days: 90,
best_streak: 90,
last_practice_date: last_practice_date_from_drills(&drills),
},
35000.0,
800,
90,
90,
last_practice_date_from_drills(&drills),
),
stats,
ranked_stats,
drills,

View File

@@ -1,6 +1,11 @@
use std::fs;
use std::path::PathBuf;
use crate::keyboard::model::KeyboardModel;
use crate::l10n::language_pack::{
LanguageLayoutValidationError, dictionary_languages_for_layout, supported_dictionary_languages,
validate_language_layout_pair,
};
use anyhow::Result;
use serde::{Deserialize, Serialize};
@@ -16,6 +21,8 @@ pub struct Config {
pub word_count: usize,
#[serde(default = "default_code_language")]
pub code_language: String,
#[serde(default = "default_dictionary_language")]
pub dictionary_language: String,
#[serde(default = "default_passage_book")]
pub passage_book: String,
#[serde(default = "default_passage_downloads_enabled")]
@@ -51,6 +58,9 @@ fn default_word_count() -> usize {
fn default_code_language() -> String {
"rust".to_string()
}
fn default_dictionary_language() -> String {
"en".to_string()
}
fn default_passage_book() -> String {
"all".to_string()
}
@@ -97,6 +107,7 @@ impl Default for Config {
keyboard_layout: default_keyboard_layout(),
word_count: default_word_count(),
code_language: default_code_language(),
dictionary_language: default_dictionary_language(),
passage_book: default_passage_book(),
passage_downloads_enabled: default_passage_downloads_enabled(),
passage_download_dir: default_passage_download_dir(),
@@ -149,11 +160,14 @@ impl Config {
self.target_wpm = self.target_wpm.clamp(10, 200);
self.word_count = self.word_count.clamp(5, 100);
self.normalize_code_language(valid_language_keys);
self.normalize_keyboard_layout();
self.normalize_dictionary_language();
self.normalize_language_layout_pair();
}
/// Validate `code_language` against known options, resetting to default if invalid.
/// Call after deserialization to handle stale/renamed keys from old configs.
pub fn normalize_code_language(&mut self, valid_keys: &[&str]) {
fn normalize_code_language(&mut self, valid_keys: &[&str]) {
// Backwards compatibility: old "shell" key is now "bash".
if self.code_language == "shell" {
self.code_language = "bash".to_string();
@@ -162,6 +176,48 @@ impl Config {
self.code_language = default_code_language();
}
}
/// Validate `dictionary_language` against supported keys.
fn normalize_dictionary_language(&mut self) {
if !supported_dictionary_languages().contains(&self.dictionary_language.as_str()) {
self.dictionary_language = default_dictionary_language();
}
}
/// Validate `keyboard_layout` against canonical profile keys.
fn normalize_keyboard_layout(&mut self) {
if !KeyboardModel::supported_layout_keys().contains(&self.keyboard_layout.as_str()) {
self.keyboard_layout = default_keyboard_layout();
}
}
/// Ensure the language/layout combination is explicitly supported.
fn normalize_language_layout_pair(&mut self) {
match self.validate_language_layout_pair() {
Ok(()) => {}
Err(LanguageLayoutValidationError::UnknownLanguage(_))
| Err(LanguageLayoutValidationError::LanguageBlockedBySupportLevel(_)) => {
self.dictionary_language = default_dictionary_language();
}
Err(LanguageLayoutValidationError::UnknownLayout(_)) => {
self.keyboard_layout = default_keyboard_layout();
}
Err(LanguageLayoutValidationError::UnsupportedLanguageLayoutPair { .. }) => {
if let Some(first_supported) =
dictionary_languages_for_layout(&self.keyboard_layout).first()
{
self.dictionary_language = (*first_supported).to_string();
} else {
self.keyboard_layout = default_keyboard_layout();
self.dictionary_language = default_dictionary_language();
}
}
}
}
pub fn validate_language_layout_pair(&self) -> Result<(), LanguageLayoutValidationError> {
validate_language_layout_pair(&self.dictionary_language, &self.keyboard_layout).map(|_| ())
}
}
#[cfg(test)]
@@ -175,6 +231,7 @@ mod tests {
assert_eq!(config.code_downloads_enabled, false);
assert_eq!(config.code_snippets_per_repo, 200);
assert_eq!(config.code_onboarding_done, false);
assert_eq!(config.dictionary_language, "en");
assert!(!config.code_download_dir.is_empty());
assert!(config.code_download_dir.contains("code"));
}
@@ -191,6 +248,7 @@ code_language = "go"
assert_eq!(config.target_wpm, 60);
assert_eq!(config.theme, "monokai");
assert_eq!(config.code_language, "go");
assert_eq!(config.dictionary_language, "en");
// New fields should have defaults
assert_eq!(config.code_downloads_enabled, false);
assert_eq!(config.code_snippets_per_repo, 200);
@@ -215,6 +273,7 @@ code_language = "go"
config.code_onboarding_done,
deserialized.code_onboarding_done
);
assert_eq!(config.dictionary_language, deserialized.dictionary_language);
}
#[test]
@@ -252,4 +311,52 @@ code_language = "go"
config.normalize_code_language(&valid_keys);
assert_eq!(config.code_language, "bash");
}
#[test]
fn test_normalize_dictionary_language_invalid_key_resets() {
let mut config = Config::default();
config.dictionary_language = "zz".to_string();
config.normalize_dictionary_language();
assert_eq!(config.dictionary_language, "en");
}
#[test]
fn test_normalize_keyboard_layout_invalid_key_resets() {
let mut config = Config::default();
config.keyboard_layout = "foo".to_string();
config.normalize_keyboard_layout();
assert_eq!(config.keyboard_layout, "qwerty");
}
#[test]
fn test_normalize_language_layout_pair_resets_invalid_pair() {
let mut config = Config::default();
config.dictionary_language = "de".to_string();
config.keyboard_layout = "dvorak".to_string();
config.normalize_language_layout_pair();
assert_eq!(config.dictionary_language, "en");
assert_eq!(config.keyboard_layout, "dvorak");
}
#[test]
fn test_validate_language_layout_pair_returns_typed_error() {
let mut config = Config::default();
config.dictionary_language = "de".to_string();
config.keyboard_layout = "dvorak".to_string();
let err = config.validate_language_layout_pair().unwrap_err();
assert!(matches!(
err,
LanguageLayoutValidationError::UnsupportedLanguageLayoutPair { .. }
));
}
#[test]
fn test_normalize_language_layout_pair_unknown_language_resets_language_only() {
let mut config = Config::default();
config.dictionary_language = "zz".to_string();
config.keyboard_layout = "qwerty".to_string();
config.normalize_language_layout_pair();
assert_eq!(config.dictionary_language, "en");
assert_eq!(config.keyboard_layout, "qwerty");
}
}

View File

@@ -4,6 +4,9 @@ use serde::{Deserialize, Serialize};
use crate::engine::key_stats::KeyStatsStore;
use crate::keyboard::display::{BACKSPACE, SPACE};
use crate::l10n::language_pack::{
DEFAULT_LATIN_PRIMARY_SEQUENCE, normalized_primary_letter_sequence,
};
/// Events returned by `SkillTree::update` describing what changed.
pub struct SkillTreeUpdate {
@@ -87,6 +90,8 @@ pub struct BranchDefinition {
pub levels: &'static [LevelDefinition],
}
// Lowercase metadata remains for static branch lookup/UI labels. Runtime
// progression and unlock counts are driven by `SkillTree::primary_letters`.
const LOWERCASE_LEVELS: &[LevelDefinition] = &[LevelDefinition {
name: "Frequency Order",
keys: &[
@@ -169,12 +174,12 @@ const CODE_SYMBOLS_LEVELS: &[LevelDefinition] = &[
pub const ALL_BRANCHES: &[BranchDefinition] = &[
BranchDefinition {
id: BranchId::Lowercase,
name: "Lowercase a-z",
name: "Primary Letters",
levels: LOWERCASE_LEVELS,
},
BranchDefinition {
id: BranchId::Capitals,
name: "Capitals A-Z",
name: "Capital Letters",
levels: CAPITALS_LEVELS,
},
BranchDefinition {
@@ -272,13 +277,14 @@ impl Default for SkillTreeProgress {
pub enum DrillScope {
/// Global adaptive: all InProgress + Complete branches
Global,
/// Branch-specific drill: specific branch + a-z background
/// Branch-specific drill: specific branch + primary-letter background
Branch(BranchId),
}
pub struct SkillTree {
pub progress: SkillTreeProgress,
pub total_unique_keys: usize,
primary_letters: Vec<char>,
}
/// Number of lowercase letters to start with before unlocking one-at-a-time
@@ -287,26 +293,49 @@ const ALWAYS_UNLOCKED_KEYS: &[char] = &[SPACE, BACKSPACE];
impl SkillTree {
pub fn new(progress: SkillTreeProgress) -> Self {
let total_unique_keys = Self::compute_total_unique_keys();
Self::new_with_primary_sequence(progress, DEFAULT_LATIN_PRIMARY_SEQUENCE)
}
pub fn new_with_primary_sequence(progress: SkillTreeProgress, sequence: &str) -> Self {
let primary_letters = Self::normalize_primary_sequence(sequence);
let total_unique_keys = Self::compute_total_unique_keys(&primary_letters);
Self {
progress,
total_unique_keys,
primary_letters,
}
}
fn compute_total_unique_keys() -> usize {
fn normalize_primary_sequence(sequence: &str) -> Vec<char> {
let normalized = normalized_primary_letter_sequence(sequence);
if normalized.is_empty() {
DEFAULT_LATIN_PRIMARY_SEQUENCE.chars().collect()
} else {
normalized
}
}
fn compute_total_unique_keys(primary_letters: &[char]) -> usize {
let mut all_keys: HashSet<char> = HashSet::new();
for branch in ALL_BRANCHES {
if branch.id == BranchId::Lowercase {
continue;
}
for level in branch.levels {
for &key in level.keys {
all_keys.insert(key);
}
}
}
all_keys.extend(primary_letters.iter().copied());
all_keys.extend(ALWAYS_UNLOCKED_KEYS.iter().copied());
all_keys.len()
}
pub fn primary_letters(&self) -> &[char] {
&self.primary_letters
}
pub fn branch_status(&self, id: BranchId) -> &BranchStatus {
self.progress
.branches
@@ -366,10 +395,14 @@ impl SkillTree {
}
}
BranchStatus::Complete => {
if branch_def.id == BranchId::Lowercase {
keys.extend(self.primary_letters.iter().copied());
} else {
for level in branch_def.levels {
keys.extend_from_slice(level.keys);
}
}
}
_ => {}
}
}
@@ -379,16 +412,13 @@ impl SkillTree {
fn branch_unlocked_keys(&self, id: BranchId) -> Vec<char> {
let mut keys = ALWAYS_UNLOCKED_KEYS.to_vec();
// Always include a-z background keys
// Always include primary-letter background keys
if id != BranchId::Lowercase {
let lowercase_def = get_branch_definition(BranchId::Lowercase);
let lowercase_bp = self.branch_progress(BranchId::Lowercase);
match lowercase_bp.status {
BranchStatus::InProgress => keys.extend(self.lowercase_unlocked_keys()),
BranchStatus::Complete => {
for level in lowercase_def.levels {
keys.extend_from_slice(level.keys);
}
keys.extend(self.primary_letters.iter().copied());
}
_ => {}
}
@@ -422,9 +452,8 @@ impl SkillTree {
/// Get the progressively-unlocked lowercase keys (mirrors old LetterUnlock logic).
fn lowercase_unlocked_keys(&self) -> Vec<char> {
let def = get_branch_definition(BranchId::Lowercase);
let bp = self.branch_progress(BranchId::Lowercase);
let all_keys = def.levels[0].keys;
let all_keys = self.primary_letters();
match bp.status {
BranchStatus::Complete => all_keys.to_vec(),
@@ -470,10 +499,14 @@ impl SkillTree {
}
}
BranchStatus::Complete => {
if branch_def.id == BranchId::Lowercase {
focus_candidates.extend(self.primary_letters.iter().copied());
} else {
for level in branch_def.levels {
focus_candidates.extend_from_slice(level.keys);
}
}
}
_ => {}
}
}
@@ -645,11 +678,11 @@ impl SkillTree {
return;
}
let all_keys = get_branch_definition(BranchId::Lowercase).levels[0].keys;
let all_keys = self.primary_letters.clone();
let current_count = LOWERCASE_MIN_KEYS + bp.current_level;
if current_count >= all_keys.len() {
// All 26 keys unlocked, check if all confident
// All primary letters unlocked, check if all confident
let all_confident = all_keys.iter().all(|&ch| stats.get_confidence(ch) >= 1.0);
if all_confident {
let bp_mut = self.branch_progress_mut(BranchId::Lowercase);
@@ -718,12 +751,18 @@ impl SkillTree {
}
}
BranchStatus::Complete => {
if branch_def.id == BranchId::Lowercase {
for &key in self.primary_letters() {
keys.insert(key);
}
} else {
for level in branch_def.levels {
for &key in level.keys {
keys.insert(key);
}
}
}
}
_ => {}
}
}
@@ -749,7 +788,13 @@ impl SkillTree {
let def = get_branch_definition(id);
let bp = self.branch_progress(id);
match bp.status {
BranchStatus::Complete => def.levels.iter().map(|l| l.keys.len()).sum(),
BranchStatus::Complete => {
if id == BranchId::Lowercase {
self.primary_letters().len()
} else {
def.levels.iter().map(|l| l.keys.len()).sum()
}
}
BranchStatus::InProgress => {
if id == BranchId::Lowercase {
self.lowercase_unlocked_count()
@@ -772,6 +817,15 @@ impl SkillTree {
def.levels.iter().map(|l| l.keys.len()).sum()
}
/// Total keys defined in a branch for this tree configuration.
pub fn branch_total_keys_for(&self, id: BranchId) -> usize {
if id == BranchId::Lowercase {
self.primary_letters().len()
} else {
Self::branch_total_keys(id)
}
}
/// Count of unique confident keys across all branches.
pub fn total_confident_keys(&self, stats: &KeyStatsStore) -> usize {
let mut keys: HashSet<char> = HashSet::new();
@@ -780,7 +834,15 @@ impl SkillTree {
keys.insert(ch);
}
}
for &ch in self.primary_letters() {
if stats.get_confidence(ch) >= 1.0 {
keys.insert(ch);
}
}
for branch_def in ALL_BRANCHES {
if branch_def.id == BranchId::Lowercase {
continue;
}
for level in branch_def.levels {
for &ch in level.keys {
if stats.get_confidence(ch) >= 1.0 {
@@ -794,6 +856,12 @@ impl SkillTree {
/// Count of confident keys in a branch.
pub fn branch_confident_keys(&self, id: BranchId, stats: &KeyStatsStore) -> usize {
if id == BranchId::Lowercase {
self.primary_letters()
.iter()
.filter(|&&ch| stats.get_confidence(ch) >= 1.0)
.count()
} else {
let def = get_branch_definition(id);
def.levels
.iter()
@@ -801,6 +869,7 @@ impl SkillTree {
.filter(|&&ch| stats.get_confidence(ch) >= 1.0)
.count()
}
}
}
impl Default for SkillTree {
@@ -812,6 +881,7 @@ impl Default for SkillTree {
#[cfg(test)]
mod tests {
use super::*;
use crate::l10n::language_pack::language_packs;
fn make_stats_confident(stats: &mut KeyStatsStore, keys: &[char]) {
for &ch in keys {
@@ -851,6 +921,21 @@ mod tests {
assert!(keys.contains(&BACKSPACE));
}
#[test]
fn test_custom_primary_sequence_drives_lowercase_progression() {
let tree = SkillTree::new_with_primary_sequence(SkillTreeProgress::default(), "abcde");
let keys = tree.unlocked_keys(DrillScope::Global);
// With a shorter primary sequence, all primary letters are immediately unlocked.
assert!(keys.contains(&'a'));
assert!(keys.contains(&'e'));
assert_eq!(tree.primary_letters(), &['a', 'b', 'c', 'd', 'e']);
assert_eq!(
tree.branch_total_keys_for(BranchId::Lowercase),
tree.primary_letters().len()
);
}
#[test]
fn test_lowercase_progressive_unlock() {
let mut tree = SkillTree::default();
@@ -871,9 +956,9 @@ mod tests {
let mut tree = SkillTree::default();
let mut stats = KeyStatsStore::default();
// Make all 26 lowercase keys confident
let all_lowercase = get_branch_definition(BranchId::Lowercase).levels[0].keys;
make_stats_confident(&mut stats, all_lowercase);
// Make all primary letters confident.
let all_primary = tree.primary_letters().to_vec();
make_stats_confident(&mut stats, &all_primary);
// Need to repeatedly update as each unlock requires all current keys confident
for _ in 0..30 {
@@ -1041,8 +1126,8 @@ mod tests {
bp.current_level = 1;
let keys = tree.unlocked_keys(DrillScope::Branch(BranchId::Capitals));
// Should include all 26 lowercase + Capitals L1 (8) + Capitals L2 (10)
assert!(keys.contains(&'e')); // lowercase background
// Should include full primary-letter background + Capitals L1 (8) + Capitals L2 (10)
assert!(keys.contains(&tree.primary_letters()[0])); // primary-letter background
assert!(keys.contains(&'T')); // Capitals L1
assert!(keys.contains(&'J')); // Capitals L2 (current level)
assert!(!keys.contains(&'O')); // Capitals L3 (locked)
@@ -1096,6 +1181,73 @@ mod tests {
assert!(branches.len() - 1 < branches.len());
}
#[test]
fn progression_is_monotonic_for_all_language_pack_sequences() {
for pack in language_packs() {
let mut tree = SkillTree::new_with_primary_sequence(
SkillTreeProgress::default(),
pack.primary_letter_sequence,
);
let primary = tree.primary_letters().to_vec();
assert!(
!primary.is_empty(),
"primary sequence should be non-empty for {}",
pack.language_key
);
let mut stats = KeyStatsStore::default();
let mut previous_count = tree.lowercase_unlocked_count();
assert!(
previous_count <= primary.len(),
"initial unlock count must be bounded for {}",
pack.language_key
);
// Master keys in configured sequence order and verify unlocked count never decreases.
for &ch in &primary {
make_stats_confident(&mut stats, &[ch]);
for _ in 0..3 {
tree.update(&stats, None);
let current_count = tree.lowercase_unlocked_count();
assert!(
current_count >= previous_count,
"unlock count regressed for {}: {} -> {}",
pack.language_key,
previous_count,
current_count
);
previous_count = current_count;
}
}
for _ in 0..30 {
tree.update(&stats, None);
let current_count = tree.lowercase_unlocked_count();
assert!(
current_count >= previous_count,
"unlock count regressed in completion pass for {}: {} -> {}",
pack.language_key,
previous_count,
current_count
);
previous_count = current_count;
}
assert_eq!(
tree.lowercase_unlocked_count(),
primary.len(),
"all primary letters should unlock for {}",
pack.language_key
);
assert_eq!(
*tree.branch_status(BranchId::Lowercase),
BranchStatus::Complete,
"lowercase branch should complete for {}",
pack.language_key
);
}
}
#[test]
fn test_update_returns_newly_unlocked() {
let mut tree = SkillTree::default();
@@ -1166,8 +1318,8 @@ mod tests {
let mut tree = SkillTree::default();
let mut stats = KeyStatsStore::default();
let all_lowercase = get_branch_definition(BranchId::Lowercase).levels[0].keys;
make_stats_confident(&mut stats, all_lowercase);
let all_primary = tree.primary_letters().to_vec();
make_stats_confident(&mut stats, &all_primary);
// Run updates to advance through progressive unlock
let mut found_available = false;
@@ -1262,8 +1414,8 @@ mod tests {
// Set all branches to InProgress at last level with all keys confident
// First complete lowercase
let all_lowercase = get_branch_definition(BranchId::Lowercase).levels[0].keys;
make_stats_confident(&mut stats, all_lowercase);
let all_primary = tree.primary_letters().to_vec();
make_stats_confident(&mut stats, &all_primary);
for _ in 0..30 {
tree.update(&stats, None);
}
@@ -1359,8 +1511,8 @@ mod tests {
let mut tree = SkillTree::default();
let mut stats = KeyStatsStore::default();
let all_lowercase = get_branch_definition(BranchId::Lowercase).levels[0].keys;
make_stats_confident(&mut stats, all_lowercase);
let all_primary = tree.primary_letters().to_vec();
make_stats_confident(&mut stats, &all_primary);
for _ in 0..30 {
let result = tree.update(&stats, None);
@@ -1382,8 +1534,8 @@ mod tests {
let mut tree = SkillTree::default();
let mut stats = KeyStatsStore::default();
let all_lowercase = get_branch_definition(BranchId::Lowercase).levels[0].keys;
make_stats_confident(&mut stats, all_lowercase);
let all_primary = tree.primary_letters().to_vec();
make_stats_confident(&mut stats, &all_primary);
for _ in 0..30 {
let result = tree.update(&stats, None);

View File

@@ -1,6 +1,10 @@
use rand::Rng;
use rand::rngs::SmallRng;
fn lowercase_eq(a: char, b: char) -> bool {
a.to_lowercase().eq(b.to_lowercase())
}
/// Post-processing pass that capitalizes words in generated text.
/// Only capitalizes using letters from `unlocked_capitals`.
pub fn apply_capitalization(
@@ -13,7 +17,7 @@ pub fn apply_capitalization(
return text.to_string();
}
let focused_upper = focused.filter(|ch| ch.is_ascii_uppercase());
let focused_upper = focused.filter(|ch| ch.is_uppercase());
let mut words: Vec<String> = text.split_whitespace().map(|w| w.to_string()).collect();
if words.is_empty() {
return text.to_string();
@@ -72,7 +76,7 @@ pub fn apply_capitalization(
if let Some(focused_upper) = focused_upper.filter(|ch| unlocked_capitals.contains(ch)) {
let alpha_words = words
.iter()
.filter(|w| w.chars().any(|ch| ch.is_ascii_alphabetic()))
.filter(|w| w.chars().any(|ch| ch.is_alphabetic()))
.count();
let min_focused = alpha_words.min(4);
ensure_min_focused_occurrences(&mut words, focused_upper, min_focused);
@@ -88,20 +92,20 @@ pub fn apply_capitalization(
fn word_start_upper(word: &str) -> Option<char> {
word.chars()
.find(|ch| ch.is_ascii_alphabetic())
.map(|ch| ch.to_ascii_uppercase())
.find(|ch| ch.is_alphabetic())
.and_then(|ch| ch.to_uppercase().next())
}
fn capitalize_word_start(word: &mut String) -> Option<char> {
let mut chars: Vec<char> = word.chars().collect();
for i in 0..chars.len() {
if chars[i].is_ascii_lowercase() {
chars[i] = chars[i].to_ascii_uppercase();
if chars[i].is_lowercase() {
chars[i] = chars[i].to_uppercase().next().unwrap_or(chars[i]);
let upper = chars[i];
*word = chars.into_iter().collect();
return Some(upper);
}
if chars[i].is_ascii_uppercase() {
if chars[i].is_uppercase() {
return Some(chars[i]);
}
}
@@ -111,20 +115,20 @@ fn capitalize_word_start(word: &mut String) -> Option<char> {
fn ends_sentence(word: &str) -> bool {
word.chars()
.rev()
.find(|ch| !ch.is_ascii_whitespace())
.find(|ch| !ch.is_whitespace())
.is_some_and(|ch| matches!(ch, '.' | '?' | '!'))
}
fn word_starts_with_lower(word: &str, lower: char) -> bool {
word.chars()
.find(|ch| ch.is_ascii_alphabetic())
.is_some_and(|ch| ch == lower)
.find(|ch| ch.is_alphabetic())
.is_some_and(|ch| lowercase_eq(ch, lower))
}
fn force_word_start_to_upper(word: &mut String, upper: char) -> bool {
let mut chars: Vec<char> = word.chars().collect();
for i in 0..chars.len() {
if chars[i].is_ascii_alphabetic() {
if chars[i].is_alphabetic() {
if chars[i] == upper {
return false;
}
@@ -137,7 +141,7 @@ fn force_word_start_to_upper(word: &mut String, upper: char) -> bool {
}
fn ensure_min_focused_occurrences(words: &mut Vec<String>, focused_upper: char, min_count: usize) {
let focused_lower = focused_upper.to_ascii_lowercase();
let focused_lower = focused_upper.to_lowercase().next().unwrap_or(focused_upper);
let mut count = words
.iter()
.map(|w| w.chars().filter(|&ch| ch == focused_upper).count())
@@ -173,8 +177,8 @@ fn ensure_min_focused_occurrences(words: &mut Vec<String>, focused_upper: char,
}
let next_starts_focused = words[i + 1]
.chars()
.find(|ch| ch.is_ascii_alphabetic())
.is_some_and(|ch| ch.eq_ignore_ascii_case(&focused_lower));
.find(|ch| ch.is_alphabetic())
.is_some_and(|ch| lowercase_eq(ch, focused_lower));
if next_starts_focused {
capitalize_word_start(&mut words[i + 1]);
let next = words.remove(i + 1);
@@ -204,7 +208,7 @@ fn ensure_min_total_capitals(
) {
let mut count = words
.iter()
.map(|w| w.chars().filter(|ch| ch.is_ascii_uppercase()).count())
.map(|w| w.chars().filter(|ch| ch.is_uppercase()).count())
.sum::<usize>();
if count >= min_count || unlocked_capitals.is_empty() {
return;
@@ -219,7 +223,7 @@ fn ensure_min_total_capitals(
continue;
};
if unlocked_capitals.contains(&upper)
&& word_starts_with_lower(word, upper.to_ascii_lowercase())
&& word_starts_with_lower(word, upper.to_lowercase().next().unwrap_or(upper))
{
if capitalize_word_start(word) == Some(upper) {
count += 1;

View File

@@ -1,26 +1,87 @@
use crate::engine::filter::CharFilter;
use crate::l10n::unicode::normalize_nfc;
const WORDS_EN: &str = include_str!("../../assets/words-en.json");
const WORDS_CS: &str = include_str!("../../assets/dictionaries/words-cs.json");
const WORDS_DA: &str = include_str!("../../assets/dictionaries/words-da.json");
const WORDS_DE: &str = include_str!("../../assets/dictionaries/words-de.json");
const WORDS_EN: &str = include_str!("../../assets/dictionaries/words-en.json");
const WORDS_ES: &str = include_str!("../../assets/dictionaries/words-es.json");
const WORDS_ET: &str = include_str!("../../assets/dictionaries/words-et.json");
const WORDS_FI: &str = include_str!("../../assets/dictionaries/words-fi.json");
const WORDS_FR: &str = include_str!("../../assets/dictionaries/words-fr.json");
const WORDS_HR: &str = include_str!("../../assets/dictionaries/words-hr.json");
const WORDS_HU: &str = include_str!("../../assets/dictionaries/words-hu.json");
const WORDS_IT: &str = include_str!("../../assets/dictionaries/words-it.json");
const WORDS_LT: &str = include_str!("../../assets/dictionaries/words-lt.json");
const WORDS_LV: &str = include_str!("../../assets/dictionaries/words-lv.json");
const WORDS_NB: &str = include_str!("../../assets/dictionaries/words-nb.json");
const WORDS_NL: &str = include_str!("../../assets/dictionaries/words-nl.json");
const WORDS_PL: &str = include_str!("../../assets/dictionaries/words-pl.json");
const WORDS_PT: &str = include_str!("../../assets/dictionaries/words-pt.json");
const WORDS_RO: &str = include_str!("../../assets/dictionaries/words-ro.json");
const WORDS_SL: &str = include_str!("../../assets/dictionaries/words-sl.json");
const WORDS_SV: &str = include_str!("../../assets/dictionaries/words-sv.json");
const WORDS_TR: &str = include_str!("../../assets/dictionaries/words-tr.json");
#[derive(Clone, Debug)]
pub struct Dictionary {
words: Vec<String>,
}
impl Dictionary {
pub fn load() -> Self {
let words: Vec<String> = serde_json::from_str(WORDS_EN).unwrap_or_default();
// Filter to words of length >= 3 (matching keybr)
let words = words
.into_iter()
.filter(|w| w.len() >= 3 && w.chars().all(|c| c.is_ascii_lowercase()))
.collect();
Self { words }
fn raw_for_language(language_key: &str) -> Option<&'static str> {
match language_key {
"cs" => Some(WORDS_CS),
"da" => Some(WORDS_DA),
"de" => Some(WORDS_DE),
"en" => Some(WORDS_EN),
"es" => Some(WORDS_ES),
"et" => Some(WORDS_ET),
"fi" => Some(WORDS_FI),
"fr" => Some(WORDS_FR),
"hr" => Some(WORDS_HR),
"hu" => Some(WORDS_HU),
"it" => Some(WORDS_IT),
"lt" => Some(WORDS_LT),
"lv" => Some(WORDS_LV),
"nb" => Some(WORDS_NB),
"nl" => Some(WORDS_NL),
"pl" => Some(WORDS_PL),
"pt" => Some(WORDS_PT),
"ro" => Some(WORDS_RO),
"sl" => Some(WORDS_SL),
"sv" => Some(WORDS_SV),
"tr" => Some(WORDS_TR),
_ => None,
}
}
pub fn words_list(&self) -> Vec<String> {
self.words.clone()
pub fn supports_language(language_key: &str) -> bool {
Self::raw_for_language(language_key).is_some()
}
pub fn try_load_for_language(language_key: &str) -> Option<Self> {
let raw = Self::raw_for_language(language_key)?;
let words: Vec<String> = serde_json::from_str(raw).unwrap_or_default();
// Filter to words of length >= 3 and normalize to NFC for consistent
// matching across composed/decomposed forms.
let words = words
.into_iter()
.map(|w| normalize_nfc(&w))
.filter(|w| w.chars().count() >= 3)
.filter(|w| !w.chars().any(|c| c.is_whitespace()))
.collect::<Vec<String>>();
Some(Self { words })
}
pub fn load_for_language(language_key: &str) -> Self {
Self::try_load_for_language(language_key)
.unwrap_or_else(|| panic!("unsupported dictionary language: {language_key}"))
}
pub fn words_list(&self) -> &[String] {
&self.words
}
pub fn find_matching(&self, filter: &CharFilter, focused: Option<char>) -> Vec<&str> {
@@ -43,10 +104,17 @@ impl Dictionary {
#[cfg(test)]
mod tests {
use super::*;
use crate::l10n::language_pack::{language_packs, supported_dictionary_languages};
#[test]
#[should_panic(expected = "unsupported dictionary language")]
fn load_for_language_unknown_panics() {
let _ = Dictionary::load_for_language("zz");
}
#[test]
fn find_matching_focused_is_sort_only() {
let dictionary = Dictionary::load();
let dictionary = Dictionary::load_for_language("en");
let filter = CharFilter::new(('a'..='z').collect());
let without_focus = dictionary.find_matching(&filter, None);
@@ -61,4 +129,34 @@ mod tests {
assert_eq!(sorted_without, sorted_with);
assert_eq!(without_focus.len(), with_focus.len());
}
#[test]
fn non_english_dictionaries_load_substantial_word_lists() {
for &lang in supported_dictionary_languages() {
if lang == "en" {
continue;
}
let dictionary = Dictionary::load_for_language(lang);
assert!(
dictionary.words_list().len() > 100,
"expected substantial dictionary for language {lang}"
);
}
}
#[test]
fn all_registered_language_packs_have_embedded_dictionary_assets() {
for pack in language_packs() {
assert!(
Dictionary::supports_language(pack.language_key),
"language pack {} is missing an embedded dictionary asset",
pack.language_key
);
assert!(
Dictionary::try_load_for_language(pack.language_key).is_some(),
"dictionary load failed for language pack {}",
pack.language_key
);
}
}
}

View File

@@ -75,15 +75,60 @@ impl PhoneticGenerator {
filter: &CharFilter,
focused_char: Option<char>,
focused_bigram: Option<[char; 2]>,
starters: &[(char, f64)],
) -> String {
for _attempt in 0..5 {
let word = self.try_generate_word(filter, focused_char, focused_bigram);
if word.len() >= MIN_WORD_LEN {
let word = self.try_generate_word(filter, focused_char, focused_bigram, starters);
if word.chars().count() >= MIN_WORD_LEN {
return word;
}
}
// Fallback
"the".to_string()
self.default_fallback_word(filter)
}
fn default_fallback_word(&self, filter: &CharFilter) -> String {
let matching = self.dictionary.find_matching(filter, None);
if let Some(word) = matching.first() {
return (*word).to_string();
}
let mut chars: Vec<char> = filter
.allowed
.iter()
.copied()
.filter(|c| !c.is_whitespace())
.collect();
chars.sort_unstable();
let fallback: String = chars.into_iter().take(3).collect();
if fallback.chars().count() >= MIN_WORD_LEN {
return fallback;
}
let seed = fallback
.chars()
.next()
.or_else(|| filter.allowed.iter().copied().find(|c| !c.is_whitespace()))
.unwrap_or('x');
std::iter::repeat_n(seed, MIN_WORD_LEN).collect()
}
fn starter_weights(&self, filter: &CharFilter) -> Vec<(char, f64)> {
let mut weights = std::collections::HashMap::<char, f64>::new();
for word in self.dictionary.words_list() {
if let Some(first) = word.chars().next()
&& filter.is_allowed(first)
{
*weights.entry(first).or_insert(0.0) += 1.0;
}
}
if weights.is_empty() {
return filter
.allowed
.iter()
.copied()
.filter(|c| !c.is_whitespace())
.map(|c| (c, 1.0))
.collect();
}
weights.into_iter().collect()
}
fn try_generate_word(
@@ -91,6 +136,7 @@ impl PhoneticGenerator {
filter: &CharFilter,
focused: Option<char>,
focused_bigram: Option<[char; 2]>,
starters: &[(char, f64)],
) -> String {
let mut word = Vec::new();
@@ -149,22 +195,10 @@ impl PhoneticGenerator {
}
// Fallback: weighted random start
if word.is_empty() {
let starters: Vec<(char, f64)> = filter
.allowed
.iter()
.map(|&ch| {
let w = match ch {
'e' | 't' | 'a' => 3.0,
'o' | 'i' | 'n' | 's' => 2.0,
_ => 1.0,
};
(ch, w)
})
.collect();
if let Some(ch) = Self::pick_weighted_from(&mut self.rng, &starters, filter) {
word.push(ch);
} else {
return "the".to_string();
return self.default_fallback_word(filter);
}
}
}
@@ -224,14 +258,16 @@ impl PhoneticGenerator {
break;
}
} else {
// Fallback to vowel
let vowels: Vec<(char, f64)> = ['a', 'e', 'i', 'o', 'u']
// Fallback to any allowed alphabetic character.
let next_chars: Vec<(char, f64)> = filter
.allowed
.iter()
.filter(|&&v| filter.is_allowed(v))
.map(|&v| (v, 1.0))
.copied()
.filter(|ch| ch.is_alphabetic())
.map(|ch| (ch, 1.0))
.collect();
if let Some(v) = Self::pick_weighted_from(&mut self.rng, &vowels, filter) {
word.push(v);
if let Some(next) = Self::pick_weighted_from(&mut self.rng, &next_chars, filter) {
word.push(next);
} else {
break;
}
@@ -357,6 +393,7 @@ impl TextGenerator for PhoneticGenerator {
.iter()
.map(|s| s.to_string())
.collect();
let starters = self.starter_weights(filter);
let pool_size = matching_words.len();
let use_dict = pool_size >= MIN_REAL_WORDS;
@@ -392,7 +429,7 @@ impl TextGenerator for PhoneticGenerator {
// Pre-categorize words into tiers for dictionary picks
let bigram_str = focused_bigram.map(|b| format!("{}{}", b[0], b[1]));
let focus_char_lower = focused_char.filter(|ch| ch.is_ascii_lowercase());
let focus_char_lower = focused_char.filter(|ch| ch.is_lowercase());
let (bigram_indices, char_indices, other_indices) = if use_dict {
let mut bi = Vec::new();
@@ -436,7 +473,8 @@ impl TextGenerator for PhoneticGenerator {
}
words.push(word);
} else {
let word = self.generate_phonetic_word(filter, focused_char, focused_bigram);
let word =
self.generate_phonetic_word(filter, focused_char, focused_bigram, &starters);
recent.push(word.clone());
if recent.len() > dedup_window {
recent.remove(0);
@@ -456,13 +494,13 @@ mod tests {
#[test]
fn focused_key_biases_real_word_sampling() {
let dictionary = Dictionary::load();
let table = TransitionTable::build_from_words(&dictionary.words_list());
let dictionary = Dictionary::load_for_language("en");
let table = TransitionTable::build_from_words(dictionary.words_list());
let filter = CharFilter::new(('a'..='z').collect());
let mut focused_gen = PhoneticGenerator::new(
table.clone(),
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(42),
HashSet::new(),
);
@@ -474,7 +512,7 @@ mod tests {
let mut baseline_gen = PhoneticGenerator::new(
table,
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(42),
HashSet::new(),
);
@@ -492,13 +530,13 @@ mod tests {
#[test]
fn test_phonetic_bigram_focus_increases_bigram_words() {
let dictionary = Dictionary::load();
let table = TransitionTable::build_from_words(&dictionary.words_list());
let dictionary = Dictionary::load_for_language("en");
let table = TransitionTable::build_from_words(dictionary.words_list());
let filter = CharFilter::new(('a'..='z').collect());
let mut bigram_gen = PhoneticGenerator::new(
table.clone(),
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(42),
HashSet::new(),
);
@@ -510,7 +548,7 @@ mod tests {
let mut baseline_gen = PhoneticGenerator::new(
table,
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(42),
HashSet::new(),
);
@@ -528,13 +566,13 @@ mod tests {
#[test]
fn test_phonetic_dual_focus_no_excessive_repeats() {
let dictionary = Dictionary::load();
let table = TransitionTable::build_from_words(&dictionary.words_list());
let dictionary = Dictionary::load_for_language("en");
let table = TransitionTable::build_from_words(dictionary.words_list());
let filter = CharFilter::new(('a'..='z').collect());
let mut generator = PhoneticGenerator::new(
table,
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(42),
HashSet::new(),
);
@@ -561,8 +599,8 @@ mod tests {
#[test]
fn cross_drill_history_suppresses_repeats() {
let dictionary = Dictionary::load();
let table = TransitionTable::build_from_words(&dictionary.words_list());
let dictionary = Dictionary::load_for_language("en");
let table = TransitionTable::build_from_words(dictionary.words_list());
// Use a filter yielding a pool above FULL_DICT_THRESHOLD so dict_ratio=1.0
// (all words are dictionary picks, maximizing history suppression signal).
// Focus on 'k' to constrain the effective tier pool further.
@@ -575,7 +613,7 @@ mod tests {
// Drill 1: generate words and collect the set
let mut gen1 = PhoneticGenerator::new(
table.clone(),
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(100),
HashSet::new(),
);
@@ -585,7 +623,7 @@ mod tests {
// Drill 2 without history (baseline)
let mut gen2_no_hist = PhoneticGenerator::new(
table.clone(),
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(200),
HashSet::new(),
);
@@ -601,7 +639,7 @@ mod tests {
// Drill 2 with history from drill 1
let mut gen2_with_hist = PhoneticGenerator::new(
table.clone(),
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(200),
words1.clone(),
);
@@ -626,8 +664,8 @@ mod tests {
#[test]
fn hybrid_mode_produces_mixed_output() {
let dictionary = Dictionary::load();
let table = TransitionTable::build_from_words(&dictionary.words_list());
let dictionary = Dictionary::load_for_language("en");
let table = TransitionTable::build_from_words(dictionary.words_list());
// Use a constrained filter to get a pool in the hybrid range (8-60).
let allowed: Vec<char> = "abcdef ".chars().collect();
let filter = CharFilter::new(allowed);
@@ -647,7 +685,7 @@ mod tests {
let mut generator = PhoneticGenerator::new(
table,
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(42),
HashSet::new(),
);
@@ -676,8 +714,8 @@ mod tests {
#[test]
fn boundary_phonetic_only_below_threshold() {
let dictionary = Dictionary::load();
let table = TransitionTable::build_from_words(&dictionary.words_list());
let dictionary = Dictionary::load_for_language("en");
let table = TransitionTable::build_from_words(dictionary.words_list());
// Very small filter — should yield < MIN_REAL_WORDS (8) dictionary matches.
// With pool < MIN_REAL_WORDS, use_dict=false so 0% intentional dictionary
// selections (the code never enters pick_tiered_word).
@@ -697,7 +735,7 @@ mod tests {
let mut generator = PhoneticGenerator::new(
table,
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(42),
HashSet::new(),
);
@@ -720,8 +758,8 @@ mod tests {
#[test]
fn boundary_full_dict_above_threshold() {
let dictionary = Dictionary::load();
let table = TransitionTable::build_from_words(&dictionary.words_list());
let dictionary = Dictionary::load_for_language("en");
let table = TransitionTable::build_from_words(dictionary.words_list());
// Full alphabet — should yield 100+ dictionary matches
let filter = CharFilter::new(('a'..='z').collect());
@@ -741,7 +779,7 @@ mod tests {
// All picks come from matching_words → 100% dictionary.
let mut generator = PhoneticGenerator::new(
table,
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(42),
HashSet::new(),
);
@@ -759,8 +797,8 @@ mod tests {
#[test]
fn weighted_suppression_graceful_degradation() {
let dictionary = Dictionary::load();
let table = TransitionTable::build_from_words(&dictionary.words_list());
let dictionary = Dictionary::load_for_language("en");
let table = TransitionTable::build_from_words(dictionary.words_list());
// Use a small filter to get a small pool
let allowed: Vec<char> = "abcdefghijk ".chars().collect();
let filter = CharFilter::new(allowed);
@@ -780,7 +818,7 @@ mod tests {
let mut generator = PhoneticGenerator::new(
table,
Dictionary::load(),
Dictionary::load_for_language("en"),
SmallRng::seed_from_u64(42),
history.clone(),
);

View File

@@ -49,10 +49,7 @@ impl TransitionTable {
let prefix_len = 3; // order - 1
for (rank, word) in words.iter().enumerate() {
if word.len() < 3 {
continue;
}
if !word.chars().all(|c| c.is_ascii_lowercase()) {
if word.chars().count() < 3 {
continue;
}
@@ -238,3 +235,48 @@ impl Default for TransitionTable {
Self::new(4)
}
}
#[cfg(test)]
mod tests {
use super::TransitionTable;
#[test]
fn build_from_words_supports_multibyte_utf8_words() {
let words = vec![
"árvore".to_string(),
"über".to_string(),
"mañana".to_string(),
"český".to_string(),
];
let table = TransitionTable::build_from_words(&words);
let start_prefix = vec![' ', ' ', ' '];
let segment = table
.segment(&start_prefix)
.expect("expected start transitions");
assert!(
segment
.iter()
.any(|(ch, _)| ['á', 'ü', 'm', 'č'].contains(ch)),
"expected UTF-8 word starts in transition table"
);
}
#[test]
fn segment_backoff_works_with_unicode_prefixes() {
let mut table = TransitionTable::new(4);
table.add(&['ü'], 'b', 1.0);
// Prefix length is intentionally longer than order-1; `segment` should back off.
let query_prefix = vec!['x', 'x', 'ü'];
let segment = table
.segment(&query_prefix)
.expect("expected backoff match for unicode prefix");
assert!(
segment.iter().any(|(ch, _)| *ch == 'b'),
"expected continuation for 'ü' prefix"
);
}
}

View File

@@ -1,51 +0,0 @@
use serde::{Deserialize, Serialize};
#[allow(dead_code)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct KeyboardLayout {
pub name: String,
pub rows: Vec<Vec<char>>,
}
impl KeyboardLayout {
pub fn qwerty() -> Self {
Self {
name: "QWERTY".to_string(),
rows: vec![
vec!['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p'],
vec!['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l'],
vec!['z', 'x', 'c', 'v', 'b', 'n', 'm'],
],
}
}
#[allow(dead_code)]
pub fn dvorak() -> Self {
Self {
name: "Dvorak".to_string(),
rows: vec![
vec!['\'', ',', '.', 'p', 'y', 'f', 'g', 'c', 'r', 'l'],
vec!['a', 'o', 'e', 'u', 'i', 'd', 'h', 't', 'n', 's'],
vec![';', 'q', 'j', 'k', 'x', 'b', 'm', 'w', 'v', 'z'],
],
}
}
#[allow(dead_code)]
pub fn colemak() -> Self {
Self {
name: "Colemak".to_string(),
rows: vec![
vec!['q', 'w', 'f', 'p', 'g', 'j', 'l', 'u', 'y'],
vec!['a', 'r', 's', 't', 'd', 'h', 'n', 'e', 'i', 'o'],
vec!['z', 'x', 'c', 'v', 'b', 'k', 'm'],
],
}
}
}
impl Default for KeyboardLayout {
fn default() -> Self {
Self::qwerty()
}
}

View File

@@ -1,4 +1,3 @@
pub mod display;
pub mod finger;
pub mod layout;
pub mod model;

File diff suppressed because it is too large Load Diff

608
src/l10n/language_pack.rs Normal file
View File

@@ -0,0 +1,608 @@
#![allow(dead_code)] // TODO(phase 1+): remove when all language-pack fields are consumed by runtime/UI.
use std::fmt;
use std::sync::OnceLock;
use crate::keyboard::model::KeyboardModel;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Script {
Latin,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SupportLevel {
Full,
Blocked,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum CapabilityState {
Enabled,
// Reserved for selector UIs that show but disable unsupported entries.
// Validation APIs still return typed errors for disabled combinations.
Disabled,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum LanguageLayoutValidationError {
UnknownLanguage(String),
UnknownLayout(String),
UnsupportedLanguageLayoutPair {
language_key: String,
layout_key: String,
},
LanguageBlockedBySupportLevel(String),
}
impl fmt::Display for LanguageLayoutValidationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::UnknownLanguage(key) => write!(f, "Unknown language: {key}"),
Self::UnknownLayout(key) => write!(f, "Unknown keyboard layout: {key}"),
Self::UnsupportedLanguageLayoutPair {
language_key,
layout_key,
} => write!(
f,
"Unsupported language/layout pair: {language_key} + {layout_key}"
),
Self::LanguageBlockedBySupportLevel(key) => {
write!(f, "Language is blocked by support level: {key}")
}
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum RankedReadinessError {
InvalidLanguageLayout(LanguageLayoutValidationError),
MissingPrimaryLetterSequence(String),
}
impl fmt::Display for RankedReadinessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidLanguageLayout(err) => write!(f, "{err}"),
Self::MissingPrimaryLetterSequence(language_key) => {
write!(
f,
"Language '{language_key}' has no usable primary letter sequence"
)
}
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct LanguagePack {
pub language_key: &'static str,
pub display_name: &'static str,
pub script: Script,
pub dictionary_asset_id: &'static str,
pub supported_keyboard_layout_keys: &'static [&'static str],
pub primary_letter_sequence: &'static str,
pub support_level: SupportLevel,
}
pub const DEFAULT_LATIN_PRIMARY_SEQUENCE: &str = "etaoinshrdlcumwfgypbvkjxqz";
const DE_PRIMARY_SEQUENCE: &str = "entrishlagcubdmfokwzüpävößjqxy";
const ES_PRIMARY_SEQUENCE: &str = "aerosintcdlmupbgvófhíjáézqyxñú";
const FR_PRIMARY_SEQUENCE: &str = "erisantoucélpmdvgfbhqzxèyjç";
const IT_PRIMARY_SEQUENCE: &str = "aieortnsclmpdugvfbzhqkyxjw";
const PT_PRIMARY_SEQUENCE: &str = "aeorsitncdmulpvgbfhçãáqíxzjéóõêúâôà";
const NL_PRIMARY_SEQUENCE: &str = "enratiosldgkvuhpmbcjwfzyxq";
const SV_PRIMARY_SEQUENCE: &str = "aertnsldkigoämvbfuöphåyjcxw";
const DA_PRIMARY_SEQUENCE: &str = "ertnsildagokmfvubpæhøyjåcwzxq";
const NB_PRIMARY_SEQUENCE: &str = "ertnsilakogdmpvfubjøyhåæcw";
const FI_PRIMARY_SEQUENCE: &str = "aitneslkuäomvrphyjdögfbcwxzq";
const PL_PRIMARY_SEQUENCE: &str = "aiezornwsycpdkmtułjlbęgćąśhóżfńź";
const CS_PRIMARY_SEQUENCE: &str = "oelantipvdsurmkhíázcěbyřjčýšéžůúfťgňďxó";
const RO_PRIMARY_SEQUENCE: &str = "eiartnuclosăpmdgvbzfîâhjțșx";
const HR_PRIMARY_SEQUENCE: &str = "aitoernspjlkuvdmzbgcčšžćhfđ";
const HU_PRIMARY_SEQUENCE: &str = "etalnskriozáémgdvbyjhpuföóőícüúűwxq";
const LT_PRIMARY_SEQUENCE: &str = "iasteuknrolmpdvgėjyšbžąųįūčęzcfh";
const LV_PRIMARY_SEQUENCE: &str = "asiternlkopmuādīvzēgjbcšfūņļķģžhč";
const SL_PRIMARY_SEQUENCE: &str = "aeiotnrsvpkldjzmučbgcšžhf";
const ET_PRIMARY_SEQUENCE: &str = "aeistulmnkrovpdhgäjõüböfš";
const TR_PRIMARY_SEQUENCE: &str = "aeinrlımkdysutobşzügğcçöhpvfj";
const EN_LAYOUTS: &[&str] = &["qwerty", "dvorak", "colemak"];
const DE_LAYOUTS: &[&str] = &["de_qwertz", "qwerty"];
const FR_LAYOUTS: &[&str] = &["fr_azerty", "qwerty"];
const ES_LAYOUTS: &[&str] = &["es_intl", "qwerty"];
const IT_LAYOUTS: &[&str] = &["it_intl", "qwerty"];
const PT_LAYOUTS: &[&str] = &["pt_intl", "qwerty"];
const NL_LAYOUTS: &[&str] = &["nl_intl", "qwerty"];
const SV_LAYOUTS: &[&str] = &["sv_intl", "qwerty"];
const DA_LAYOUTS: &[&str] = &["da_intl", "qwerty"];
const NB_LAYOUTS: &[&str] = &["nb_intl", "qwerty"];
const FI_LAYOUTS: &[&str] = &["fi_intl", "qwerty"];
const PL_LAYOUTS: &[&str] = &["pl_intl", "qwerty"];
const CS_LAYOUTS: &[&str] = &["cs_intl", "qwerty"];
const RO_LAYOUTS: &[&str] = &["ro_intl", "qwerty"];
const HR_LAYOUTS: &[&str] = &["hr_intl", "qwerty"];
const HU_LAYOUTS: &[&str] = &["hu_intl", "qwerty"];
const LT_LAYOUTS: &[&str] = &["lt_intl", "qwerty"];
const LV_LAYOUTS: &[&str] = &["lv_intl", "qwerty"];
const SL_LAYOUTS: &[&str] = &["sl_intl", "qwerty"];
const ET_LAYOUTS: &[&str] = &["et_intl", "qwerty"];
const TR_LAYOUTS: &[&str] = &["tr_intl", "qwerty"];
// Seed registry for phase 0. Support levels will be tightened as keyboard
// profiles and Unicode handling phases are implemented.
static LANGUAGE_PACKS: &[LanguagePack] = &[
LanguagePack {
language_key: "en",
display_name: "English",
script: Script::Latin,
dictionary_asset_id: "words-en",
supported_keyboard_layout_keys: EN_LAYOUTS,
primary_letter_sequence: DEFAULT_LATIN_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "de",
display_name: "German",
script: Script::Latin,
dictionary_asset_id: "words-de",
supported_keyboard_layout_keys: DE_LAYOUTS,
primary_letter_sequence: DE_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "es",
display_name: "Spanish",
script: Script::Latin,
dictionary_asset_id: "words-es",
supported_keyboard_layout_keys: ES_LAYOUTS,
primary_letter_sequence: ES_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "fr",
display_name: "French",
script: Script::Latin,
dictionary_asset_id: "words-fr",
supported_keyboard_layout_keys: FR_LAYOUTS,
primary_letter_sequence: FR_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "it",
display_name: "Italian",
script: Script::Latin,
dictionary_asset_id: "words-it",
supported_keyboard_layout_keys: IT_LAYOUTS,
primary_letter_sequence: IT_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "pt",
display_name: "Portuguese",
script: Script::Latin,
dictionary_asset_id: "words-pt",
supported_keyboard_layout_keys: PT_LAYOUTS,
primary_letter_sequence: PT_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "nl",
display_name: "Dutch",
script: Script::Latin,
dictionary_asset_id: "words-nl",
supported_keyboard_layout_keys: NL_LAYOUTS,
primary_letter_sequence: NL_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "sv",
display_name: "Swedish",
script: Script::Latin,
dictionary_asset_id: "words-sv",
supported_keyboard_layout_keys: SV_LAYOUTS,
primary_letter_sequence: SV_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "da",
display_name: "Danish",
script: Script::Latin,
dictionary_asset_id: "words-da",
supported_keyboard_layout_keys: DA_LAYOUTS,
primary_letter_sequence: DA_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "nb",
display_name: "Norwegian Bokmal",
script: Script::Latin,
dictionary_asset_id: "words-nb",
supported_keyboard_layout_keys: NB_LAYOUTS,
primary_letter_sequence: NB_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "fi",
display_name: "Finnish",
script: Script::Latin,
dictionary_asset_id: "words-fi",
supported_keyboard_layout_keys: FI_LAYOUTS,
primary_letter_sequence: FI_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "pl",
display_name: "Polish",
script: Script::Latin,
dictionary_asset_id: "words-pl",
supported_keyboard_layout_keys: PL_LAYOUTS,
primary_letter_sequence: PL_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "cs",
display_name: "Czech",
script: Script::Latin,
dictionary_asset_id: "words-cs",
supported_keyboard_layout_keys: CS_LAYOUTS,
primary_letter_sequence: CS_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "ro",
display_name: "Romanian",
script: Script::Latin,
dictionary_asset_id: "words-ro",
supported_keyboard_layout_keys: RO_LAYOUTS,
primary_letter_sequence: RO_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "hr",
display_name: "Croatian",
script: Script::Latin,
dictionary_asset_id: "words-hr",
supported_keyboard_layout_keys: HR_LAYOUTS,
primary_letter_sequence: HR_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "hu",
display_name: "Hungarian",
script: Script::Latin,
dictionary_asset_id: "words-hu",
supported_keyboard_layout_keys: HU_LAYOUTS,
primary_letter_sequence: HU_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "lt",
display_name: "Lithuanian",
script: Script::Latin,
dictionary_asset_id: "words-lt",
supported_keyboard_layout_keys: LT_LAYOUTS,
primary_letter_sequence: LT_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "lv",
display_name: "Latvian",
script: Script::Latin,
dictionary_asset_id: "words-lv",
supported_keyboard_layout_keys: LV_LAYOUTS,
primary_letter_sequence: LV_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "sl",
display_name: "Slovene",
script: Script::Latin,
dictionary_asset_id: "words-sl",
supported_keyboard_layout_keys: SL_LAYOUTS,
primary_letter_sequence: SL_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "et",
display_name: "Estonian",
script: Script::Latin,
dictionary_asset_id: "words-et",
supported_keyboard_layout_keys: ET_LAYOUTS,
primary_letter_sequence: ET_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
LanguagePack {
language_key: "tr",
display_name: "Turkish",
script: Script::Latin,
dictionary_asset_id: "words-tr",
supported_keyboard_layout_keys: TR_LAYOUTS,
primary_letter_sequence: TR_PRIMARY_SEQUENCE,
support_level: SupportLevel::Full,
},
];
pub fn language_packs() -> &'static [LanguagePack] {
LANGUAGE_PACKS
}
pub fn find_language_pack(language_key: &str) -> Option<&'static LanguagePack> {
LANGUAGE_PACKS
.iter()
.find(|pack| pack.language_key == language_key)
}
pub fn supported_dictionary_languages() -> &'static [&'static str] {
static SUPPORTED: OnceLock<Vec<&'static str>> = OnceLock::new();
SUPPORTED
.get_or_init(|| {
LANGUAGE_PACKS
.iter()
.filter(|pack| matches!(pack.support_level, SupportLevel::Full))
.map(|pack| pack.language_key)
.collect()
})
.as_slice()
}
pub fn dictionary_languages_for_layout(layout_key: &str) -> Vec<&'static str> {
LANGUAGE_PACKS
.iter()
.filter_map(
|pack| match validate_language_layout_pair(pack.language_key, layout_key) {
Ok(CapabilityState::Enabled) => Some(pack.language_key),
_ => None,
},
)
.collect()
}
pub fn default_keyboard_layout_for_language(language_key: &str) -> Option<&'static str> {
let pack = find_language_pack(language_key)?;
pack.supported_keyboard_layout_keys.first().copied()
}
pub fn validate_language_layout_pair(
language_key: &str,
layout_key: &str,
) -> Result<CapabilityState, LanguageLayoutValidationError> {
let Some(pack) = find_language_pack(language_key) else {
return Err(LanguageLayoutValidationError::UnknownLanguage(
language_key.to_string(),
));
};
if !KeyboardModel::supported_layout_keys().contains(&layout_key) {
return Err(LanguageLayoutValidationError::UnknownLayout(
layout_key.to_string(),
));
}
if matches!(pack.support_level, SupportLevel::Blocked) {
return Err(
LanguageLayoutValidationError::LanguageBlockedBySupportLevel(language_key.to_string()),
);
}
Ok(CapabilityState::Enabled)
}
pub fn normalized_primary_letter_sequence(sequence: &str) -> Vec<char> {
let mut out = Vec::new();
for ch in sequence.chars().filter(|ch| ch.is_alphabetic()) {
if !out.contains(&ch) {
out.push(ch);
}
}
out
}
pub fn has_usable_primary_letter_sequence(sequence: &str) -> bool {
!normalized_primary_letter_sequence(sequence).is_empty()
}
pub fn ranked_adaptive_readiness(
language_key: &str,
layout_key: &str,
) -> Result<(), RankedReadinessError> {
validate_language_layout_pair(language_key, layout_key)
.map_err(RankedReadinessError::InvalidLanguageLayout)?;
let Some(pack) = find_language_pack(language_key) else {
return Err(RankedReadinessError::InvalidLanguageLayout(
LanguageLayoutValidationError::UnknownLanguage(language_key.to_string()),
));
};
if !has_usable_primary_letter_sequence(pack.primary_letter_sequence) {
return Err(RankedReadinessError::MissingPrimaryLetterSequence(
language_key.to_string(),
));
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
fn enabled_pairs() -> Vec<(&'static str, &'static str)> {
let mut pairs = Vec::new();
for pack in language_packs() {
for &layout_key in KeyboardModel::supported_layout_keys() {
if matches!(
validate_language_layout_pair(pack.language_key, layout_key),
Ok(CapabilityState::Enabled)
) {
pairs.push((pack.language_key, layout_key));
}
}
}
pairs
}
#[test]
fn language_pack_keys_are_unique() {
let mut seen = HashSet::new();
for pack in language_packs() {
assert!(seen.insert(pack.language_key));
assert!(pack.primary_letter_sequence.len() >= 10);
assert!(!pack.dictionary_asset_id.is_empty());
assert!(!pack.supported_keyboard_layout_keys.is_empty());
assert!(matches!(pack.script, Script::Latin));
}
}
#[test]
fn english_pack_exists_and_is_full() {
let en = find_language_pack("en").expect("missing en language pack");
assert_eq!(en.support_level, SupportLevel::Full);
assert_eq!(en.primary_letter_sequence, DEFAULT_LATIN_PRIMARY_SEQUENCE);
assert!(en.primary_letter_sequence.starts_with("etaoin"));
}
#[test]
fn german_pack_primary_sequence_contains_locale_letters() {
let de = find_language_pack("de").expect("missing de language pack");
assert!(de.primary_letter_sequence.contains('ä'));
assert!(de.primary_letter_sequence.contains('ö'));
assert!(de.primary_letter_sequence.contains('ü'));
assert!(de.primary_letter_sequence.contains('ß'));
}
#[test]
fn non_english_packs_have_language_specific_primary_sequences() {
for pack in language_packs() {
if pack.language_key == "en" {
continue;
}
assert_ne!(
pack.primary_letter_sequence, DEFAULT_LATIN_PRIMARY_SEQUENCE,
"language {} should not reuse default English sequence",
pack.language_key
);
}
}
#[test]
fn locale_letters_are_typeable_on_language_native_layouts() {
for pack in language_packs() {
if pack.language_key == "en" {
continue;
}
let native_layout_key = match pack.language_key {
"de" => "de_qwertz".to_string(),
"fr" => "fr_azerty".to_string(),
key => format!("{key}_intl"),
};
let model = KeyboardModel::from_key(&native_layout_key)
.expect("native layout key should map to a keyboard model");
for ch in normalized_primary_letter_sequence(pack.primary_letter_sequence) {
if ch.is_ascii_lowercase() {
continue;
}
assert!(
model.physical_key_for(ch).is_some(),
"native layout {} should type locale letter '{}' for language {}",
native_layout_key,
ch,
pack.language_key
);
}
}
}
#[test]
fn supported_dictionary_languages_are_registry_backed() {
for key in supported_dictionary_languages() {
assert!(find_language_pack(key).is_some());
}
}
#[test]
fn supported_dictionary_languages_include_non_english_languages() {
let supported = supported_dictionary_languages();
assert!(supported.contains(&"en"));
assert!(supported.contains(&"de"));
assert!(supported.contains(&"es"));
}
#[test]
fn validate_language_layout_pair_unknown_language() {
let err = validate_language_layout_pair("zz", "qwerty").unwrap_err();
assert!(matches!(
err,
LanguageLayoutValidationError::UnknownLanguage(_)
));
}
#[test]
fn validate_language_layout_pair_unknown_layout() {
let err = validate_language_layout_pair("en", "foo").unwrap_err();
assert!(matches!(
err,
LanguageLayoutValidationError::UnknownLayout(_)
));
}
#[test]
fn validate_language_layout_pair_allows_cross_language_layout_pair() {
let state = validate_language_layout_pair("en", "de_qwertz")
.expect("cross-language/layout pair should be allowed");
assert_eq!(state, CapabilityState::Enabled);
}
#[test]
fn dictionary_languages_for_layout_qwerty_contains_english() {
let keys = dictionary_languages_for_layout("qwerty");
assert!(keys.contains(&"en"));
}
#[test]
fn dictionary_languages_for_layout_contains_full_language_set_for_supported_layouts() {
let de = dictionary_languages_for_layout("de_qwertz");
assert_eq!(de.len(), supported_dictionary_languages().len());
assert!(de.contains(&"de"));
let fr = dictionary_languages_for_layout("fr_azerty");
assert_eq!(fr.len(), supported_dictionary_languages().len());
assert!(fr.contains(&"fr"));
}
#[test]
fn normalized_primary_sequence_filters_non_letters_and_dedupes() {
assert_eq!(
normalized_primary_letter_sequence("a1áa!bB"),
vec!['a', 'á', 'b', 'B']
);
}
#[test]
fn usable_primary_sequence_requires_at_least_one_letter() {
assert!(!has_usable_primary_letter_sequence("12345!?"));
assert!(has_usable_primary_letter_sequence("é"));
}
#[test]
fn ranked_adaptive_readiness_rejects_invalid_layout() {
let err = ranked_adaptive_readiness("en", "not_a_layout").unwrap_err();
assert!(matches!(
err,
RankedReadinessError::InvalidLanguageLayout(
LanguageLayoutValidationError::UnknownLayout(_)
)
));
}
#[test]
fn ranked_adaptive_readiness_accepts_all_enabled_pairs() {
for (language_key, layout_key) in enabled_pairs() {
assert!(
ranked_adaptive_readiness(language_key, layout_key).is_ok(),
"expected readiness for pair: {language_key}+{layout_key}"
);
}
}
}

2
src/l10n/mod.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod language_pack;
pub mod unicode;

25
src/l10n/unicode.rs Normal file
View File

@@ -0,0 +1,25 @@
use icu_normalizer::ComposingNormalizerBorrowed;
pub fn normalize_nfc(input: &str) -> String {
ComposingNormalizerBorrowed::new_nfc()
.normalize(input)
.into_owned()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn normalize_nfc_composes_equivalent_unicode_sequences() {
let composed = "é";
let decomposed = "e\u{0301}";
assert_eq!(normalize_nfc(composed), normalize_nfc(decomposed));
}
#[test]
fn normalize_nfc_is_stable_for_precomposed_and_ascii() {
assert_eq!(normalize_nfc("Árvíztűrő"), "Árvíztűrő");
assert_eq!(normalize_nfc("abc"), "abc");
}
}

View File

@@ -8,6 +8,7 @@
pub mod config;
pub mod engine;
pub mod keyboard;
pub mod l10n;
pub mod session;
pub mod store;

File diff suppressed because it is too large Load Diff

View File

@@ -16,6 +16,13 @@ pub struct JsonStore {
}
impl JsonStore {
const STORE_FILES: [&'static str; 4] = [
"profile.json",
"key_stats.json",
"key_stats_ranked.json",
"lesson_history.json",
];
pub fn new() -> Result<Self> {
let base_dir = dirs::data_dir()
.unwrap_or_else(|| PathBuf::from("."))
@@ -34,6 +41,31 @@ impl JsonStore {
self.base_dir.join(name)
}
pub fn archive_legacy_data_files(&self) {
for name in Self::STORE_FILES {
let path = self.file_path(name);
if !path.exists() {
continue;
}
let legacy_path = self.file_path(&format!("{name}.legacy"));
if let Err(e) = fs::remove_file(&legacy_path)
&& e.kind() != std::io::ErrorKind::NotFound
{
eprintln!(
"warning: failed to remove old legacy archive {}: {e}",
legacy_path.display()
);
}
if let Err(e) = fs::rename(&path, &legacy_path) {
eprintln!(
"warning: failed to archive legacy store file {} -> {}: {e}",
path.display(),
legacy_path.display()
);
}
}
}
fn load<T: DeserializeOwned + Default>(&self, name: &str) -> T {
let path = self.file_path(name);
if path.exists() {
@@ -236,15 +268,9 @@ impl JsonStore {
/// Check for leftover .bak files from an interrupted import.
/// Returns true if recovery files were found (and cleaned up).
pub fn check_interrupted_import(&self) -> bool {
let bak_names = [
"profile.json.bak",
"key_stats.json.bak",
"key_stats_ranked.json.bak",
"lesson_history.json.bak",
];
let mut found = false;
for name in &bak_names {
let bak_path = self.base_dir.join(name);
for name in Self::STORE_FILES {
let bak_path = self.file_path(&format!("{name}.bak"));
if bak_path.exists() {
found = true;
let _ = fs::remove_file(&bak_path);
@@ -404,4 +430,19 @@ mod tests {
// Should have been cleaned up
assert!(!store.file_path("profile.json.bak").exists());
}
#[test]
fn test_archive_legacy_data_files_renames_known_store_files() {
let (_dir, store) = make_test_store();
fs::write(store.file_path("profile.json"), "{}").unwrap();
fs::write(store.file_path("key_stats.json"), "{}").unwrap();
store.archive_legacy_data_files();
assert!(!store.file_path("profile.json").exists());
assert!(store.file_path("profile.json.legacy").exists());
assert!(!store.file_path("key_stats.json").exists());
assert!(store.file_path("key_stats.json.legacy").exists());
}
}

View File

@@ -1,17 +1,23 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use crate::config::Config;
use crate::engine::key_stats::KeyStatsStore;
use crate::engine::skill_tree::SkillTreeProgress;
use crate::session::result::DrillResult;
const SCHEMA_VERSION: u32 = 2;
pub const SCHEMA_VERSION: u32 = 3;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ProfileData {
pub schema_version: u32,
/// Legacy single-scope progress mirror retained for import/export compatibility.
/// Always write this via `set_skill_tree_for_language`, never directly.
pub skill_tree: SkillTreeProgress,
/// Language-scoped skill tree progression state keyed by dictionary language.
#[serde(default)]
pub skill_tree_by_language: HashMap<String, SkillTreeProgress>,
pub total_score: f64,
#[serde(alias = "total_lessons")]
pub total_drills: u32,
@@ -25,6 +31,7 @@ impl Default for ProfileData {
Self {
schema_version: SCHEMA_VERSION,
skill_tree: SkillTreeProgress::default(),
skill_tree_by_language: HashMap::new(),
total_score: 0.0,
total_drills: 0,
streak_days: 0,
@@ -39,6 +46,20 @@ impl ProfileData {
pub fn needs_reset(&self) -> bool {
self.schema_version != SCHEMA_VERSION
}
pub fn skill_tree_for_language(&self, language_key: &str) -> SkillTreeProgress {
self.skill_tree_by_language
.get(language_key)
.cloned()
.unwrap_or_else(|| self.skill_tree.clone())
}
pub fn set_skill_tree_for_language(&mut self, language_key: &str, progress: SkillTreeProgress) {
self.skill_tree_by_language
.insert(language_key.to_string(), progress.clone());
// Keep legacy mirror aligned with the current active scope.
self.skill_tree = progress;
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
@@ -87,3 +108,50 @@ pub struct ExportData {
pub ranked_key_stats: KeyStatsData,
pub drill_history: DrillHistoryData,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn profile_skill_tree_for_language_falls_back_to_legacy() {
let profile = ProfileData::default();
let scoped = profile.skill_tree_for_language("de");
let lowercase = scoped
.branches
.get("lowercase")
.expect("lowercase branch should exist");
assert_eq!(lowercase.current_level, 0);
}
#[test]
fn profile_set_skill_tree_for_language_updates_scoped_map() {
let mut profile = ProfileData::default();
let mut progress = SkillTreeProgress::default();
progress
.branches
.get_mut("lowercase")
.expect("lowercase branch should exist")
.current_level = 3;
profile.set_skill_tree_for_language("de", progress.clone());
let loaded = profile.skill_tree_for_language("de");
assert_eq!(
loaded
.branches
.get("lowercase")
.expect("lowercase branch should exist")
.current_level,
3
);
assert_eq!(
profile
.skill_tree
.branches
.get("lowercase")
.expect("lowercase branch should exist")
.current_level,
3
);
}
}

View File

@@ -1,4 +1,5 @@
use std::collections::HashSet;
use std::collections::{HashMap, HashSet};
use std::sync::{Mutex, OnceLock};
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
@@ -6,7 +7,7 @@ use ratatui::style::{Color, Modifier, Style};
use ratatui::widgets::{Block, Widget};
use crate::keyboard::display::{self, BACKSPACE, ENTER, SPACE, TAB};
use crate::keyboard::model::KeyboardModel;
use crate::keyboard::model::{KeyboardModel, PhysicalKey};
use crate::ui::theme::Theme;
pub struct KeyboardDiagram<'a> {
@@ -21,6 +22,31 @@ pub struct KeyboardDiagram<'a> {
pub caps_lock: bool,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
enum KeyboardRenderMode {
Compact,
Full,
FullFallback,
}
#[derive(Clone, Debug)]
struct KeyboardGeometry {
key_width: u16,
row_offsets: Vec<u16>,
keyboard_width: u16,
start_inset: u16,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
struct GeometryCacheKey {
layout_key: String,
mode: KeyboardRenderMode,
width: u16,
height: u16,
}
const MAX_GEOMETRY_CACHE_ENTRIES: usize = 128;
impl<'a> KeyboardDiagram<'a> {
pub fn new(
next_key: Option<char>,
@@ -73,6 +99,154 @@ impl<'a> KeyboardDiagram<'a> {
}
}
fn geometry_cache() -> &'static Mutex<HashMap<GeometryCacheKey, KeyboardGeometry>> {
static CACHE: OnceLock<Mutex<HashMap<GeometryCacheKey, KeyboardGeometry>>> = OnceLock::new();
CACHE.get_or_init(|| Mutex::new(HashMap::new()))
}
fn rows_for_mode<'a>(model: &'a KeyboardModel, mode: KeyboardRenderMode) -> &'a [Vec<PhysicalKey>] {
match mode {
KeyboardRenderMode::Compact | KeyboardRenderMode::FullFallback => model.letter_rows(),
KeyboardRenderMode::Full => &model.rows,
}
}
fn render_mode_for(inner: Rect, compact: bool) -> KeyboardRenderMode {
if compact {
KeyboardRenderMode::Compact
} else if inner.height >= 4 && inner.width >= 75 {
KeyboardRenderMode::Full
} else {
KeyboardRenderMode::FullFallback
}
}
fn build_geometry(
inner: Rect,
model: &KeyboardModel,
mode: KeyboardRenderMode,
) -> Option<KeyboardGeometry> {
let rows = rows_for_mode(model, mode);
match mode {
KeyboardRenderMode::Compact => {
if inner.height < 3 || inner.width < 21 {
return None;
}
let key_width = 3;
let row_offsets = vec![3, 4, 6];
let keyboard_width = rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = row_offsets.get(row_idx).copied().unwrap_or(0);
offset + row.len() as u16 * key_width + 3
})
.max()
.unwrap_or(0);
Some(KeyboardGeometry {
key_width,
row_offsets,
keyboard_width,
start_inset: inner.width.saturating_sub(keyboard_width) / 2,
})
}
KeyboardRenderMode::Full => {
if inner.height < 4 || inner.width < 75 {
return None;
}
let key_width = 5;
let row_offsets = vec![0, 5, 5, 6];
let keyboard_width = rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = row_offsets.get(row_idx).copied().unwrap_or(0);
let row_end = offset + row.len() as u16 * key_width;
match row_idx {
0 => row_end + 6,
2 => row_end + 7,
3 => row_end + 6,
_ => row_end,
}
})
.max()
.unwrap_or(0);
Some(KeyboardGeometry {
key_width,
row_offsets,
keyboard_width,
start_inset: inner.width.saturating_sub(keyboard_width) / 2,
})
}
KeyboardRenderMode::FullFallback => {
if inner.height < 3 || inner.width < 30 {
return None;
}
let key_width = 5;
let row_offsets = vec![1, 3, 5];
let keyboard_width = rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = row_offsets.get(row_idx).copied().unwrap_or(0);
offset + row.len() as u16 * key_width
})
.max()
.unwrap_or(0);
Some(KeyboardGeometry {
key_width,
row_offsets,
keyboard_width,
start_inset: inner.width.saturating_sub(keyboard_width) / 2,
})
}
}
}
fn geometry_for_mode(
inner: Rect,
model: &KeyboardModel,
mode: KeyboardRenderMode,
) -> Option<KeyboardGeometry> {
let key = GeometryCacheKey {
layout_key: model.layout_key.to_string(),
mode,
width: inner.width,
height: inner.height,
};
if let Some(geom) = geometry_cache()
.lock()
.expect("keyboard geometry cache poisoned")
.get(&key)
.cloned()
{
return Some(geom);
}
let built = build_geometry(inner, model, mode)?;
let mut cache = geometry_cache()
.lock()
.expect("keyboard geometry cache poisoned");
if cache.len() >= MAX_GEOMETRY_CACHE_ENTRIES {
// Bounded cache: simple full-clear avoids unbounded growth across long resize sessions.
cache.clear();
}
cache.insert(key, built.clone());
Some(built)
}
fn geometry_for(inner: Rect, model: &KeyboardModel, compact: bool) -> Option<KeyboardGeometry> {
geometry_for_mode(inner, model, render_mode_for(inner, compact))
}
fn show_shifted_for_key(key: &PhysicalKey, shift_held: bool, caps_lock: bool) -> bool {
if key.base.is_alphabetic() {
shift_held ^ caps_lock
} else {
shift_held
}
}
fn brighten_color(color: Color) -> Color {
match color {
Color::Rgb(r, g, b) => Color::Rgb(
@@ -297,30 +471,13 @@ impl KeyboardDiagram<'_> {
fn render_compact(&self, inner: Rect, buf: &mut Buffer) {
let colors = &self.theme.colors;
let letter_rows = self.model.letter_rows();
let key_width: u16 = 3;
let min_width: u16 = 21;
if inner.height < 3 || inner.width < min_width {
let Some(geometry) = geometry_for_mode(inner, self.model, KeyboardRenderMode::Compact)
else {
return;
}
let offsets: &[u16] = &[3, 4, 6];
let keyboard_width = letter_rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = offsets.get(row_idx).copied().unwrap_or(0);
let row_end = offset + row.len() as u16 * key_width;
match row_idx {
0 => row_end + 3, // [B]
1 => row_end + 3, // [E]
2 => row_end + 3, // [S]
_ => row_end,
}
})
.max()
.unwrap_or(0);
let start_x = inner.x + inner.width.saturating_sub(keyboard_width) / 2;
};
let key_width = geometry.key_width;
let offsets = &geometry.row_offsets;
let start_x = inner.x + geometry.start_inset;
for (row_idx, row) in letter_rows.iter().enumerate() {
let y = inner.y + row_idx as u16;
@@ -353,12 +510,8 @@ impl KeyboardDiagram<'_> {
break;
}
// Caps lock inverts shift for alpha keys only
let show_shifted = if physical_key.base.is_ascii_alphabetic() {
self.shift_held ^ self.caps_lock
} else {
self.shift_held
};
let show_shifted =
show_shifted_for_key(physical_key, self.shift_held, self.caps_lock);
let display_char = if show_shifted {
physical_key.shifted
} else {
@@ -403,7 +556,7 @@ impl KeyboardDiagram<'_> {
}
// Backspace at end of first row
if inner.height >= 3 {
if inner.height >= 3 && !letter_rows.is_empty() {
let y = inner.y;
let row_end_x = start_x + offsets[0] + letter_rows[0].len() as u16 * key_width;
if row_end_x + 3 <= inner.x + inner.width {
@@ -418,33 +571,17 @@ impl KeyboardDiagram<'_> {
fn render_full(&self, inner: Rect, buf: &mut Buffer) {
let colors = &self.theme.colors;
let key_width: u16 = 5;
let min_width: u16 = 75;
if inner.height < 4 || inner.width < min_width {
let Some(geometry) = geometry_for(inner, self.model, false) else {
return;
};
if render_mode_for(inner, false) != KeyboardRenderMode::Full {
self.render_full_fallback(inner, buf);
return;
}
let offsets: &[u16] = &[0, 5, 5, 6];
let keyboard_width = self
.model
.rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = offsets.get(row_idx).copied().unwrap_or(0);
let row_end = offset + row.len() as u16 * key_width;
match row_idx {
0 => row_end + 6, // [Bksp]
2 => row_end + 7, // [Enter]
3 => row_end + 6, // [Shft]
_ => row_end,
}
})
.max()
.unwrap_or(0);
let start_x = inner.x + inner.width.saturating_sub(keyboard_width) / 2;
let key_width = geometry.key_width;
let offsets = &geometry.row_offsets;
let keyboard_width = geometry.keyboard_width;
let start_x = inner.x + geometry.start_inset;
for (row_idx, row) in self.model.rows.iter().enumerate() {
let y = inner.y + row_idx as u16;
@@ -496,12 +633,8 @@ impl KeyboardDiagram<'_> {
break;
}
// Caps lock inverts shift for alpha keys only
let show_shifted = if physical_key.base.is_ascii_alphabetic() {
self.shift_held ^ self.caps_lock
} else {
self.shift_held
};
let show_shifted =
show_shifted_for_key(physical_key, self.shift_held, self.caps_lock);
let display_char = if show_shifted {
physical_key.shifted
} else {
@@ -576,22 +709,13 @@ impl KeyboardDiagram<'_> {
fn render_full_fallback(&self, inner: Rect, buf: &mut Buffer) {
let colors = &self.theme.colors;
let letter_rows = self.model.letter_rows();
let key_width: u16 = 5;
let offsets: &[u16] = &[1, 3, 5];
let keyboard_width = letter_rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = offsets.get(row_idx).copied().unwrap_or(0);
offset + row.len() as u16 * key_width
})
.max()
.unwrap_or(0);
let start_x = inner.x + inner.width.saturating_sub(keyboard_width) / 2;
if inner.height < 3 || inner.width < 30 {
let Some(geometry) = geometry_for_mode(inner, self.model, KeyboardRenderMode::FullFallback)
else {
return;
}
};
let key_width = geometry.key_width;
let offsets = &geometry.row_offsets;
let start_x = inner.x + geometry.start_inset;
for (row_idx, row) in letter_rows.iter().enumerate() {
let y = inner.y + row_idx as u16;
@@ -607,12 +731,8 @@ impl KeyboardDiagram<'_> {
break;
}
// Caps lock inverts shift for alpha keys only
let show_shifted = if physical_key.base.is_ascii_alphabetic() {
self.shift_held ^ self.caps_lock
} else {
self.shift_held
};
let show_shifted =
show_shifted_for_key(physical_key, self.shift_held, self.caps_lock);
let display_char = if show_shifted {
physical_key.shifted
} else {
@@ -641,30 +761,11 @@ fn rect_contains(area: Rect, x: u16, y: u16) -> bool {
}
fn key_at_compact_position(inner: Rect, model: &KeyboardModel, x: u16, y: u16) -> Option<char> {
let geometry = geometry_for_mode(inner, model, KeyboardRenderMode::Compact)?;
let letter_rows = model.letter_rows();
let key_width: u16 = 3;
let min_width: u16 = 21;
if inner.height < 3 || inner.width < min_width {
return None;
}
let offsets: &[u16] = &[3, 4, 6];
let keyboard_width = letter_rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = offsets.get(row_idx).copied().unwrap_or(0);
let row_end = offset + row.len() as u16 * key_width;
match row_idx {
0 => row_end + 3,
1 => row_end + 3,
2 => row_end + 3,
_ => row_end,
}
})
.max()
.unwrap_or(0);
let start_x = inner.x + inner.width.saturating_sub(keyboard_width) / 2;
let key_width = geometry.key_width;
let offsets = &geometry.row_offsets;
let start_x = inner.x + geometry.start_inset;
for (row_idx, row) in letter_rows.iter().enumerate() {
let row_y = inner.y + row_idx as u16;
@@ -727,30 +828,13 @@ fn key_at_compact_position(inner: Rect, model: &KeyboardModel, x: u16, y: u16) -
}
fn shift_at_compact_position(inner: Rect, model: &KeyboardModel, x: u16, y: u16) -> bool {
let letter_rows = model.letter_rows();
let key_width: u16 = 3;
let min_width: u16 = 21;
if inner.height < 3 || inner.width < min_width {
let Some(geometry) = geometry_for_mode(inner, model, KeyboardRenderMode::Compact) else {
return false;
}
let offsets: &[u16] = &[3, 4, 6];
let keyboard_width = letter_rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = offsets.get(row_idx).copied().unwrap_or(0);
let row_end = offset + row.len() as u16 * key_width;
match row_idx {
0 => row_end + 3,
1 => row_end + 3,
2 => row_end + 3,
_ => row_end,
}
})
.max()
.unwrap_or(0);
let start_x = inner.x + inner.width.saturating_sub(keyboard_width) / 2;
};
let letter_rows = model.letter_rows();
let key_width = geometry.key_width;
let offsets = &geometry.row_offsets;
let start_x = inner.x + geometry.start_inset;
let shift_row_y = inner.y + 2;
if y != shift_row_y {
return false;
@@ -759,6 +843,9 @@ fn shift_at_compact_position(inner: Rect, model: &KeyboardModel, x: u16, y: u16)
if rect_contains(left_shift, x, y) {
return true;
}
if letter_rows.len() <= 2 {
return false;
}
let offset = offsets[2];
let row_end_x = start_x + offset + letter_rows[2].len() as u16 * key_width;
let right_shift = Rect::new(row_end_x, shift_row_y, 3, 1);
@@ -766,25 +853,11 @@ fn shift_at_compact_position(inner: Rect, model: &KeyboardModel, x: u16, y: u16)
}
fn key_at_full_position(inner: Rect, model: &KeyboardModel, x: u16, y: u16) -> Option<char> {
let key_width: u16 = 5;
let offsets: &[u16] = &[0, 5, 5, 6];
let keyboard_width = model
.rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = offsets.get(row_idx).copied().unwrap_or(0);
let row_end = offset + row.len() as u16 * key_width;
match row_idx {
0 => row_end + 6,
2 => row_end + 7,
3 => row_end + 6,
_ => row_end,
}
})
.max()
.unwrap_or(0);
let start_x = inner.x + inner.width.saturating_sub(keyboard_width) / 2;
let geometry = geometry_for_mode(inner, model, KeyboardRenderMode::Full)?;
let key_width = geometry.key_width;
let offsets = &geometry.row_offsets;
let keyboard_width = geometry.keyboard_width;
let start_x = inner.x + geometry.start_inset;
for (row_idx, row) in model.rows.iter().enumerate() {
let row_y = inner.y + row_idx as u16;
@@ -862,25 +935,12 @@ fn key_at_full_position(inner: Rect, model: &KeyboardModel, x: u16, y: u16) -> O
}
fn shift_at_full_position(inner: Rect, model: &KeyboardModel, x: u16, y: u16) -> bool {
let key_width: u16 = 5;
let offsets: &[u16] = &[0, 5, 5, 6];
let keyboard_width = model
.rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = offsets.get(row_idx).copied().unwrap_or(0);
let row_end = offset + row.len() as u16 * key_width;
match row_idx {
0 => row_end + 6,
2 => row_end + 7,
3 => row_end + 6,
_ => row_end,
}
})
.max()
.unwrap_or(0);
let start_x = inner.x + inner.width.saturating_sub(keyboard_width) / 2;
let Some(geometry) = geometry_for_mode(inner, model, KeyboardRenderMode::Full) else {
return false;
};
let key_width = geometry.key_width;
let offsets = &geometry.row_offsets;
let start_x = inner.x + geometry.start_inset;
let shift_row_y = inner.y + 3;
if y != shift_row_y {
return false;
@@ -890,6 +950,9 @@ fn shift_at_full_position(inner: Rect, model: &KeyboardModel, x: u16, y: u16) ->
if rect_contains(left_shift, x, y) {
return true;
}
if model.rows.len() <= 3 {
return false;
}
let offset = offsets[3];
let row_end_x = start_x + offset + model.rows[3].len() as u16 * key_width;
let right_shift = Rect::new(row_end_x, shift_row_y, 6, 1);
@@ -902,23 +965,11 @@ fn key_at_full_fallback_position(
x: u16,
y: u16,
) -> Option<char> {
let geometry = geometry_for_mode(inner, model, KeyboardRenderMode::FullFallback)?;
let letter_rows = model.letter_rows();
let key_width: u16 = 5;
let offsets: &[u16] = &[1, 3, 5];
let keyboard_width = letter_rows
.iter()
.enumerate()
.map(|(row_idx, row)| {
let offset = offsets.get(row_idx).copied().unwrap_or(0);
offset + row.len() as u16 * key_width
})
.max()
.unwrap_or(0);
let start_x = inner.x + inner.width.saturating_sub(keyboard_width) / 2;
if inner.height < 3 || inner.width < 30 {
return None;
}
let key_width = geometry.key_width;
let offsets = &geometry.row_offsets;
let start_x = inner.x + geometry.start_inset;
for (row_idx, row) in letter_rows.iter().enumerate() {
let row_y = inner.y + row_idx as u16;
@@ -940,3 +991,183 @@ fn key_at_full_fallback_position(
fn shift_at_full_fallback_position(_inner: Rect, _model: &KeyboardModel, _x: u16, _y: u16) -> bool {
false
}
#[cfg(test)]
fn geometry_cache_len() -> usize {
geometry_cache()
.lock()
.expect("keyboard geometry cache poisoned")
.len()
}
#[cfg(test)]
fn geometry_cache_clear() {
geometry_cache()
.lock()
.expect("keyboard geometry cache poisoned")
.clear();
}
#[cfg(test)]
fn geometry_cache_matching_entries(
layout_key: &str,
mode: KeyboardRenderMode,
width: u16,
height: u16,
) -> usize {
geometry_cache()
.lock()
.expect("keyboard geometry cache poisoned")
.keys()
.filter(|k| {
k.layout_key == layout_key && k.mode == mode && k.width == width && k.height == height
})
.count()
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Mutex, OnceLock};
fn cache_test_lock() -> &'static Mutex<()> {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
LOCK.get_or_init(|| Mutex::new(()))
}
fn assert_roundtrip_for_render_mode(
model: &KeyboardModel,
area: Rect,
compact: bool,
mode: KeyboardRenderMode,
) {
let inner = Block::bordered().inner(area);
let geometry =
geometry_for_mode(inner, model, mode).expect("expected geometry for test render mode");
let rows = rows_for_mode(model, mode);
for (row_idx, row) in rows.iter().enumerate() {
let row_y = inner.y + row_idx as u16;
let offset = geometry.row_offsets.get(row_idx).copied().unwrap_or(0);
for (col_idx, key) in row.iter().enumerate() {
let key_x =
inner.x + geometry.start_inset + offset + col_idx as u16 * geometry.key_width;
let hit_x = key_x;
let hit_y = row_y;
let hit = KeyboardDiagram::key_at_position(area, model, compact, hit_x, hit_y);
assert_eq!(
hit,
Some(key.base),
"round-trip hit-test mismatch for layout={}, mode={mode:?}, row={row_idx}, col={col_idx}, key={}",
model.layout_key,
key.base
);
}
}
}
#[test]
fn geometry_cache_reuses_entries_for_same_layout_mode_and_viewport() {
let _guard = cache_test_lock()
.lock()
.expect("cache test lock should not be poisoned");
geometry_cache_clear();
let model = KeyboardModel::from_key("qwerty").expect("qwerty model must exist");
let area = Rect::new(0, 0, 100, 10);
let inner = Block::bordered().inner(area);
let mode = render_mode_for(inner, false);
let _ = KeyboardDiagram::key_at_position(area, &model, false, 10, 2);
assert_eq!(
geometry_cache_matching_entries(model.layout_key, mode, inner.width, inner.height),
1
);
for _ in 0..50 {
let _ = KeyboardDiagram::key_at_position(area, &model, false, 12, 2);
let _ = KeyboardDiagram::shift_at_position(area, &model, false, 5, 3);
}
assert_eq!(
geometry_cache_matching_entries(model.layout_key, mode, inner.width, inner.height),
1,
"expected exactly one cached geometry entry for repeated same key"
);
}
#[test]
fn geometry_cache_distinguishes_layout_and_viewport_keys() {
let _guard = cache_test_lock()
.lock()
.expect("cache test lock should not be poisoned");
geometry_cache_clear();
let qwerty = KeyboardModel::from_key("qwerty").expect("qwerty model must exist");
let azerty = KeyboardModel::from_key("fr_azerty").expect("fr_azerty model must exist");
let _ = KeyboardDiagram::key_at_position(Rect::new(0, 0, 100, 10), &qwerty, false, 8, 2);
let after_first = geometry_cache_len();
assert!(after_first >= 1);
let _ = KeyboardDiagram::key_at_position(Rect::new(0, 0, 120, 10), &qwerty, false, 8, 2);
let _ = KeyboardDiagram::key_at_position(Rect::new(0, 0, 100, 10), &azerty, false, 8, 2);
assert!(
geometry_cache_len() >= after_first + 2,
"expected separate cached geometry entries for viewport/layout changes"
);
}
#[test]
fn geometry_cache_is_bounded() {
let _guard = cache_test_lock()
.lock()
.expect("cache test lock should not be poisoned");
geometry_cache_clear();
let model = KeyboardModel::from_key("qwerty").expect("qwerty model must exist");
for i in 0..(MAX_GEOMETRY_CACHE_ENTRIES as u16 + 10) {
let width = 90 + i;
let area = Rect::new(0, 0, width, 10);
let _ = KeyboardDiagram::key_at_position(area, &model, false, 10, 2);
}
assert!(
geometry_cache_len() <= MAX_GEOMETRY_CACHE_ENTRIES,
"geometry cache exceeded bounded capacity"
);
}
#[test]
fn hit_test_roundtrip_invariants_hold_for_all_layouts() {
let _guard = cache_test_lock()
.lock()
.expect("cache test lock should not be poisoned");
for &layout_key in KeyboardModel::supported_layout_keys() {
let model = KeyboardModel::from_key(layout_key).expect("profile should exist");
// Full render mode.
assert_roundtrip_for_render_mode(
&model,
Rect::new(0, 0, 100, 10),
false,
KeyboardRenderMode::Full,
);
// Full fallback mode (non-compact, but too small for full keyboard).
assert_roundtrip_for_render_mode(
&model,
Rect::new(0, 0, 60, 8),
false,
KeyboardRenderMode::FullFallback,
);
// Compact mode.
assert_roundtrip_for_render_mode(
&model,
Rect::new(0, 0, 60, 8),
true,
KeyboardRenderMode::Compact,
);
}
}
}

View File

@@ -37,6 +37,13 @@ impl<'a> SkillTreeWidget<'a> {
}
}
fn locked_branch_notice(skill_tree: &SkillTreeEngine) -> String {
format!(
"Complete {} primary letters to unlock branches",
skill_tree.primary_letters().len()
)
}
/// Get the list of selectable branch IDs (Lowercase first, then other branches).
pub fn selectable_branches() -> Vec<BranchId> {
vec![
@@ -59,8 +66,21 @@ pub fn detail_line_count(branch_id: BranchId) -> usize {
.sum::<usize>()
}
pub fn detail_line_count_with_level_spacing(branch_id: BranchId, level_spacing: bool) -> usize {
let base = detail_line_count(branch_id);
pub fn detail_line_count_for_tree(skill_tree: &SkillTreeEngine, branch_id: BranchId) -> usize {
if branch_id == BranchId::Lowercase {
// 1 branch header + 1 level header + one line per primary letter.
1 + 1 + skill_tree.primary_letters().len()
} else {
detail_line_count(branch_id)
}
}
pub fn detail_line_count_with_level_spacing_for_tree(
skill_tree: &SkillTreeEngine,
branch_id: BranchId,
level_spacing: bool,
) -> usize {
let base = detail_line_count_for_tree(skill_tree, branch_id);
if !level_spacing {
return base;
}
@@ -68,6 +88,7 @@ pub fn detail_line_count_with_level_spacing(branch_id: BranchId, level_spacing:
base + def.levels.len().saturating_sub(1)
}
#[cfg(test)]
pub fn use_expanded_level_spacing(detail_area_height: u16, branch_id: BranchId) -> bool {
let def = get_branch_definition(branch_id);
let base = detail_line_count(branch_id);
@@ -75,6 +96,17 @@ pub fn use_expanded_level_spacing(detail_area_height: u16, branch_id: BranchId)
(detail_area_height as usize) >= base + extra
}
pub fn use_expanded_level_spacing_for_tree(
skill_tree: &SkillTreeEngine,
detail_area_height: u16,
branch_id: BranchId,
) -> bool {
let def = get_branch_definition(branch_id);
let base = detail_line_count_for_tree(skill_tree, branch_id);
let extra = def.levels.len().saturating_sub(1);
(detail_area_height as usize) >= base + extra
}
pub fn use_side_by_side_layout(inner_width: u16) -> bool {
inner_width >= 100
}
@@ -107,7 +139,8 @@ impl Widget for SkillTreeWidget<'_> {
// Layout: main split (branch list + detail) and footer (adaptive height)
let branches = selectable_branches();
let (footer_hints, footer_notice) = if self.selected < branches.len() {
let (footer_hints, footer_notice): (Vec<&str>, Option<String>) =
if self.selected < branches.len() {
let bp = self.skill_tree.branch_progress(branches[self.selected]);
if *self.skill_tree.branch_status(branches[self.selected]) == BranchStatus::Locked {
(
@@ -116,7 +149,7 @@ impl Widget for SkillTreeWidget<'_> {
"[PgUp/PgDn or Ctrl+U/Ctrl+D] Scroll",
"[q] Back",
],
Some("Complete a-z to unlock branches"),
Some(locked_branch_notice(self.skill_tree)),
)
} else if bp.status == BranchStatus::Available {
(
@@ -160,6 +193,7 @@ impl Widget for SkillTreeWidget<'_> {
};
let hint_lines = pack_hint_lines(&footer_hints, inner.width as usize);
let notice_lines = footer_notice
.as_deref()
.map(|text| wrapped_line_count(text, inner.width as usize))
.unwrap_or(0);
let show_notice = footer_notice.is_some()
@@ -273,7 +307,7 @@ impl SkillTreeWidget<'_> {
let bp = self.skill_tree.branch_progress(branch_id);
let def = get_branch_definition(branch_id);
let total_keys = def.levels.iter().map(|l| l.keys.len()).sum::<usize>();
let total_keys = self.skill_tree.branch_total_keys_for(branch_id);
let confident_keys = self
.skill_tree
.branch_confident_keys(branch_id, self.key_stats);
@@ -346,7 +380,10 @@ impl SkillTreeWidget<'_> {
lines.push(Line::from(""));
}
lines.push(Line::from(Span::styled(
" \u{2500}\u{2500} Branches (available after a-z) \u{2500}\u{2500}",
format!(
" \u{2500}\u{2500} Branches (available after {} primary letters) \u{2500}\u{2500}",
self.skill_tree.primary_letters().len()
),
Style::default().fg(colors.text_pending()),
)));
// If inter-branch spacing is enabled, the next branch will already
@@ -377,15 +414,15 @@ impl SkillTreeWidget<'_> {
let branch_id = branches[self.selected];
let bp = self.skill_tree.branch_progress(branch_id);
let def = get_branch_definition(branch_id);
let expanded_level_spacing =
allow_expanded_level_spacing && use_expanded_level_spacing(area.height, branch_id);
let expanded_level_spacing = allow_expanded_level_spacing
&& use_expanded_level_spacing_for_tree(self.skill_tree, area.height, branch_id);
let mut lines: Vec<Line> = Vec::new();
// Branch title with level info
let level_text = if branch_id == BranchId::Lowercase {
let unlocked = self.skill_tree.branch_unlocked_count(BranchId::Lowercase);
let total = SkillTreeEngine::branch_total_keys(BranchId::Lowercase);
let total = self.skill_tree.branch_total_keys_for(BranchId::Lowercase);
format!("Unlocked {unlocked}/{total} letters")
} else {
match bp.status {
@@ -441,7 +478,12 @@ impl SkillTreeWidget<'_> {
)));
// Per-key mastery bars
for &key in level.keys {
let level_keys: Vec<char> = if branch_id == BranchId::Lowercase {
self.skill_tree.primary_letters().to_vec()
} else {
level.keys.to_vec()
};
for &key in &level_keys {
let is_focused = focused == Some(key);
let confidence = self.key_stats.get_confidence(key).min(1.0);
let is_confident = confidence >= 1.0;

View File

@@ -743,7 +743,7 @@ impl StatsDashboard<'_> {
};
let show_shifted = inner.height >= 10; // 4 base + 4 shifted + 1 mod row + 1 spare
let all_rows = &self.keyboard_model.rows;
let offsets: &[u16] = &[0, 2, 3, 4];
let offsets = self.keyboard_model.geometry_hints.row_offsets;
let kbd_width = all_rows
.iter()
.enumerate()
@@ -898,7 +898,7 @@ impl StatsDashboard<'_> {
};
let show_shifted = inner.height >= 10; // 4 base + 4 shifted + 1 mod row + 1 spare
let all_rows = &self.keyboard_model.rows;
let offsets: &[u16] = &[0, 2, 3, 4];
let offsets = self.keyboard_model.geometry_hints.row_offsets;
let kbd_width = all_rows
.iter()
.enumerate()