diff --git a/src/elements/list.rs b/src/elements/list.rs index 8390d83..17781c6 100644 --- a/src/elements/list.rs +++ b/src/elements/list.rs @@ -23,6 +23,7 @@ use crate::parser::source::Cursor; use crate::parser::source::Token; use crate::parser::source::VirtualSource; use crate::parser::util; +use lsp::hints::Hints; use parser::util::escape_source; use regex::Regex; @@ -217,7 +218,7 @@ impl ListRule { } // New depth else if idx + 1 == v.len() { - Some(prev_idx + 1) + Some(*prev_idx + 1) } // Increase from previous else { @@ -395,6 +396,20 @@ impl Rule for ListRule { ListRule::push_markers(&token, state, document, &vec![], &depth); } + if let Some(hints) = + Hints::from_source(token.source(), &state.shared.lsp) + { + let mut label = String::new(); + for (_, id) in &depth + { + if !label.is_empty() { + label.push('.'); + } + label.push_str(id.to_string().as_str()); + } + hints.add(captures.get(1).unwrap().end(), label); + } + state.push( document, Box::new(ListEntry { diff --git a/src/elements/style.rs b/src/elements/style.rs index 05b06e0..da21a5f 100644 --- a/src/elements/style.rs +++ b/src/elements/style.rs @@ -108,7 +108,7 @@ impl RuleState for StyleState { .map(|last| { ( last.location().source(), - last.location().end() - 1..last.location().end(), + last.location().end_offset(1)..last.location().end(), ) }) .unwrap(); diff --git a/src/parser/source.rs b/src/parser/source.rs index 328a185..8299576 100644 --- a/src/parser/source.rs +++ b/src/parser/source.rs @@ -5,6 +5,7 @@ use std::rc::Rc; use downcast_rs::impl_downcast; use downcast_rs::Downcast; +use unicode_segmentation::UnicodeSegmentation; /// Trait for source content pub trait Source: Downcast + Debug { @@ -314,13 +315,68 @@ pub struct Token { } impl Token { + /// Creates a new token from a range and a source pub fn new(range: Range, source: Rc) -> Self { Self { range, source } } + /// Retrieve the source of the token pub fn source(&self) -> Rc { self.source.clone() } + /// Get the start byte position of the token pub fn start(&self) -> usize { self.range.start } + /// Get the end byte position of the token pub fn end(&self) -> usize { self.range.end } + + /// Get a byte position from a grapheme offset + /// + /// When in need of diagnostics over a range, use this method instead of adding bytes to `start()` + /// In case the offsets is out of range, the value of `start()` is returned instead. + /// + /// # Example + /// + /// Say you have the following range: + /// `πŸš½β€πŸ‘¨TEXT` (πŸš½β€πŸ‘¨ = 3 + TEXT = 4 codepoints) + /// Calling [`start_offset(1)`] over this range would give you the byte position of character `T` + pub fn start_offset(&self, offset: usize) -> usize { + if offset == 0 { + return self.start(); + } + + let mut graphemes = self.source.content()[self.range.start..self.range.end] + .grapheme_indices(true) + .skip(offset - 1); + + return graphemes + .next() + .map(|(pos, _)| pos) + .unwrap_or(self.range.end); + } + + /// Get a byte position from a grapheme offset + /// + /// When in need of diagnostics over a range, use this method instead of subtracting bytes from `end()` + /// In case the offsets is out of range, the value of `end()` is returned instead. + /// + /// # Example + /// + /// Say you have the following range: + /// `TEXTπŸŽ…β€πŸ¦½` (TEXT = 4 + πŸŽ…β€πŸ¦½ = 3 codepoints) + /// Calling [`end_offset(1)`] over this range would give you the byte position of character `πŸŽ…β€πŸ¦½` + pub fn end_offset(&self, offset: usize) -> usize { + if offset == 0 { + return self.end(); + } + + let mut graphemes = self.source.content()[0..self.range.end] + .grapheme_indices(true) + .rev() + .skip(offset - 1); + + return graphemes + .next() + .map(|(pos, _)| pos) + .unwrap_or(self.range.end); + } } impl From> for Token {