add constants, negate functions

This commit is contained in:
2024-04-17 01:55:45 +02:00
parent 6266b49fd5
commit 673d94e568
+45 -18
View File
@@ -1,5 +1,11 @@
use std::f64::consts;
use std::{io, io::Write};
const PLANCK_CONSTANT: f64 = 6.62607015e-34f64;
const ATOMIC_MASS_CONSTANT: f64 = 1.66053906660e-27f64;
const WIEN_WAVELENGTH_DISPLACEMENT_LAW_CONSTANT: f64 = 2.897771955e-3f64;
const SPEED_OF_LIGHT_IN_VACUUM: f64 = 299792458f64;
#[derive(Debug, PartialEq)]
enum TokenizeError {
NumberParseError(String),
@@ -60,6 +66,11 @@ fn main() {
.read_line(&mut input)
.expect("Failed to read line");
if input == "clear\n" {
print!("\u{001b}c");
continue;
}
let result = compute(&input);
print_result(result);
}
@@ -88,19 +99,13 @@ impl Token {
fn print_result(result: f64) {
if result > 10_000. || (result < 0.0001 && result > -0.0001) && result != 0. {
println!("{:e}", result);
println!("{:E}", result);
} else {
println!("{result}");
}
}
fn compute(input: &str) -> f64 {
let input = &input
.trim()
.replace(' ', "")
.replace("pi", "π")
.replace("tau", "τ");
let tokens = match tokenize(input) {
Ok(v) => v,
Err(e) => {
@@ -173,7 +178,7 @@ fn tokenize(input: &str) -> Result<Vec<Token>, TokenizeError> {
'/' | '÷' => Token::Divide,
'+' => Token::Add,
'-' | '' => {
// exception for negative exponent for scientific notation
// exception for negative exponent in scientific notation
if let Some(p) = prev {
if p == 'E' {
num_buf.push(c);
@@ -185,15 +190,15 @@ fn tokenize(input: &str) -> Result<Vec<Token>, TokenizeError> {
'^' => Token::Power,
'%' => Token::Modulus,
',' => Token::Separator,
'π' => Token::Number(std::f64::consts::PI),
'e' => Token::Number(std::f64::consts::E),
'τ' => Token::Number(std::f64::consts::TAU),
'π' => Token::Number(consts::PI),
'τ' => Token::Number(consts::TAU),
'0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | 'E' | '.' => {
push_buf(&mut tokens, &mut fun_buf)?;
num_buf.push(c);
prev = Some(c);
continue;
}
' ' | '\n' | '\t' | '\r' => continue,
_ => {
push_buf(&mut tokens, &mut num_buf)?;
fun_buf.push(c);
@@ -221,6 +226,7 @@ fn push_buf(tokens: &mut Vec<Token>, buf: &mut String) -> Result<(), TokenizeErr
}
fn parse_buffer(buf: &str) -> Result<Token, TokenizeError> {
// This does not handle implicit multiplication with constants and functions.
match buf {
"sin" => Ok(Token::Function(FunctionType::Sine)),
"cos" => Ok(Token::Function(FunctionType::Cosine)),
@@ -229,6 +235,13 @@ fn parse_buffer(buf: &str) -> Result<Token, TokenizeError> {
"max" => Ok(Token::Function(FunctionType::Max)),
"min" => Ok(Token::Function(FunctionType::Min)),
"sqrt" => Ok(Token::Function(FunctionType::SquareRoot)),
"e" => Ok(Token::Number(consts::E)),
"pi" => Ok(Token::Number(consts::PI)),
"tau" => Ok(Token::Number(consts::TAU)),
"h" => Ok(Token::Number(PLANCK_CONSTANT)),
"a" => Ok(Token::Number(WIEN_WAVELENGTH_DISPLACEMENT_LAW_CONSTANT)),
"u" => Ok(Token::Number(ATOMIC_MASS_CONSTANT)),
"c" => Ok(Token::Number(SPEED_OF_LIGHT_IN_VACUUM)),
_ => {
if let Ok(number) = buf.parse() {
Ok(Token::Number(number))
@@ -285,12 +298,15 @@ fn implicit_operations(tokens: Vec<Token>) -> Option<Vec<Token>> {
new_tokens.pop();
new_tokens.pop();
new_tokens.push(Token::Number(-n));
} else if new_tokens[1] == Token::LeftParenthesis {
new_tokens.pop();
new_tokens.pop();
} else if new_tokens[1] == Token::LeftParenthesis
|| matches!(new_tokens[1], Token::Function(_))
{
let a = new_tokens[1].clone();
new_tokens.pop()?;
new_tokens.pop()?;
new_tokens.push(Token::Number(-1.));
new_tokens.push(Token::Multiply);
new_tokens.push(Token::LeftParenthesis);
new_tokens.push(a);
}
}
@@ -477,8 +493,9 @@ fn calculate(tokens: Vec<Token>) -> Result<f64, CalculateError> {
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn token_test() {
fn tokens() {
assert_eq!(tokenize("("), Ok(vec![Token::LeftParenthesis]));
assert_eq!(tokenize(")"), Ok(vec![Token::RightParenthesis]));
assert_eq!(tokenize("*"), Ok(vec![Token::Multiply]));
@@ -489,6 +506,14 @@ mod tests {
assert_eq!(tokenize("3"), Ok(vec![Token::Number(3.)]));
assert_eq!(tokenize("e"), Ok(vec![Token::Number(std::f64::consts::E)]));
assert_eq!(tokenize("π"), Ok(vec![Token::Number(std::f64::consts::PI)]));
assert_eq!(
tokenize("pi"),
Ok(vec![Token::Number(std::f64::consts::PI)])
);
assert_eq!(
tokenize("tau"),
Ok(vec![Token::Number(std::f64::consts::TAU)])
);
assert_eq!(
tokenize("max"),
Ok(vec![Token::Function(FunctionType::Max)])
@@ -507,7 +532,7 @@ mod tests {
);
assert_eq!(
tokenize("ln"),
Ok(vec![Token::Function(FunctionType::Log10)])
Ok(vec![Token::Function(FunctionType::NaturalLog)])
);
assert_eq!(
tokenize("log"),
@@ -554,8 +579,9 @@ mod tests {
])
);
}
#[test]
fn implicit_test() {
fn implicit() {
// (3)2 == (3)*2
assert_eq!(
implicit_operations(vec![
@@ -686,6 +712,7 @@ mod tests {
])
);
}
#[test]
fn infix_to_postfix_test() {
assert_eq!(