wip lexer

This commit is contained in:
hugogogo
2026-04-28 22:04:23 +02:00
parent a2333a3ff1
commit 48221894c0
5 changed files with 165 additions and 9 deletions

View File

@@ -91,7 +91,7 @@ $(NAME): $(OBJS)
run: $(NAME)
@echo $(YELLOW)"run"$(RESET)
@./$(NAME)
@./$(NAME) "3 * x^2 + 1 * x^1 - 2 * x^0"
clean:
$(RM_OBJS)

View File

@@ -12,3 +12,31 @@ this project uses submodules (maybe recursively), so either :
- `git clone --recurse-submodules <repo-url>`
- or, after cloning : `git submodule update --init --recursive`
## steps
1. lexer
-> tokens[] :
{
PLUS -> +
MINUS -> -
VARIABLE -> x
NUMBER -> int or double
POWER -> ^
MULTIPLICATION -> *
DIVISION -> /
END -> null
}[]
2. parser
-> terms[] :
{
SIGN -> + or -
COEFFICIENT -> double
EXPONENT -> double
}[]
3. reduce
4. print reduced form
5. find degree
6. print degree
7. solve
8. print solution

View File

@@ -1,6 +1,37 @@
#ifndef COMPUTORV1_H
# define COMPUTORV1_H
#define COMPUTORV1_H
# include "../libft/includes/libft.h"
#include "../libft/includes/libft.h"
typedef enum
{
ERROR_BASIC, // 0
ERROR_UNKNOWN_TOKEN, // 1
} program_error;
typedef enum
{
TOKEN_PLUS, // +
TOKEN_MINUS, // -
TOKEN_VARIABLE, // x, y, etc.
TOKEN_NUMBER, // int or double
TOKEN_POWER, // ^ or **
TOKEN_MULTIPLICATION, // *
TOKEN_DIVISION, // /
TOKEN_END // null (end of input)
} TokenType;
typedef struct
{
TokenType type;
union
{
double num_value; // For NUMBER
char var_value; // For VARIABLE (single char, e.g., 'x')
};
} t_token;
#define MAX_TOKENS 100
t_token tokens[MAX_TOKENS];
#endif

2
libft

Submodule libft updated: 603303a21b...2be81d5630

View File

@@ -1,15 +1,112 @@
#include "computorv1.h"
int stop_errors(int err)
{
switch (err)
{
case -ERROR_UNKNOWN_TOKEN:
ft_putstr_fd("error: unknown token\n", STDERR_FILENO);
break;
default:
ft_putstr_fd("unknown error\n", STDERR_FILENO);
break;
}
exit(err);
}
int skip_whitespace(const char *input, int input_pos)
{
while (ft_isspace(input[input_pos]))
{
input_pos++;
}
return input_pos;
}
int token_is_plus(const char *input, int input_pos)
{
return (input[input_pos] == '+');
}
int lexerize(const char *input)
{
int token_count = 0;
int input_pos = 0;
int token_size = 0;
while (input[input_pos])
{
input_pos = skip_whitespace(input, input_pos);
if (input[input_pos] == '\0')
{
break;
}
token_size = token_is_plus(input, input_pos);
if (token_size)
{
tokens[token_count].type = TOKEN_PLUS;
tokens[token_count].var_value = '+';
}
if (token_size == 0)
{
stop_errors(-ERROR_UNKNOWN_TOKEN);
}
token_count++;
input_pos += token_size;
}
// Add end token
tokens[token_count].type = TOKEN_END;
tokens[token_count].var_value = '\0';
return 1;
}
int main(int ac, char **av)
{
int i;
int ret;
if (ac < 2)
{
return 0;
}
i = 0;
while(i < ac) {
ft_putstr_fd(av[i], STDOUT_FILENO);
ft_putchar_fd('\n', STDOUT_FILENO);
while (i < ac)
{
ft_putnbr(i);
ft_putstr(" : ");
ft_putstr(av[i]);
ft_putchar('\n');
i++;
}
ret = lexerize(av[1]);
if (ret <= 0)
{
stop_errors(ret);
}
// tmp debug output
i = 0;
while (tokens[i].type != TOKEN_END)
{
ft_printf("token %i :\n type : %i\n value : ", i, tokens[i].type);
if (tokens[i].type == TOKEN_NUMBER)
{
ft_printf("%d\n", i, tokens[i].num_value);
}
else
{
ft_printf("%c\n", tokens[i].var_value);
}
i++;
}
return (0);
}