Extensible real world like solution

• This problem consists of several subproblems. Solving in the right way requires good design skills as well. Following solution can look long which I agree. However, there are several advantages of composing code this way.

• It has reusability. Tokenizer can be used in different products/projects
• Can be testable. Each component can be test separately and with advantage of dependency injection, mocking is possible.
• Extensible. Consider that you need to support floating points and also internationalization. All you need to change is tokenizer. Same is true for parentheses. If you want to support parentheses, just update ShuntingYard.

From algorithms perspective, following code uses Shunting Yard algorithm. This algorithm converts given mathematical expression to precedence agnostic Reverse polish notation version. Shunting yard can handle almost all possible operators including left associativity, right associativity as well.

Here is the sample solution.

``````class Solution {
struct token {
char op;
double val;
bool isOp;
token(): op(' '), val(0), isOp(false) {}
};

class tokenizer {
const string &s;
int curr;
unordered_set<char> operators;

void skipWhitespace() {
while(curr < s.size() && s[curr] == ' ') {
curr++;
}
}

public:
tokenizer(const string &str, unordered_set<char> op): s(str), curr(0), operators(op) {}
bool hasNext() {
skipWhitespace();
return curr < s.size();
}
token getNext() {
token res;
if(operators.count(s[curr])) {
res.op = s[curr++];
res.isOp = true;
} else {
int num = 0;
while(s[curr] >= '0' && s[curr] <= '9') {
num = num*10+s[curr]-'0';
curr++;
}
res.val = num;
res.isOp = false;
}

return res;
}
};

vector<token> shuntingYard(const vector<token> &tokens) {
vector<token> res;
stack<token> s;

for(auto token: tokens) {
if(!token.isOp) {
res.push_back(token);
} else {
while(!s.empty() && precedence[s.top().op] <= precedence[token.op]) {
res.push_back(s.top());
s.pop();
}
s.push(token);
}
}

while(!s.empty()) {
res.push_back(s.top());
s.pop();
}

return res;
}

int evalRpn(const vector<token> &tokens) {
stack<token> s;

for(auto token: tokens) {
if(token.isOp) {
auto rhs = s.top();
s.pop();
auto lhs = s.top();
s.pop();
s.push(eval(lhs, rhs, token));
} else {
s.push(token);
}
}

return s.top().val;
}

token eval(const token &lhs, const token &rhs, const token &op) {
token res;
switch(op.op) {
case '+':
res.val = lhs.val + rhs.val;
break;
case '-':
res.val = lhs.val - rhs.val;
break;
case '*':
res.val = (int)(lhs.val * rhs.val);
break;
case '/':
res.val = (int)(lhs.val / rhs.val);
break;
}
return res;
}

unordered_map<char, int> precedence = {{'*', 1}, {'/', 1}, {'+', 2}, {'-', 2}};

public:
int calculate(string s) {
tokenizer t(s, {'+', '-', '/', '*'});
vector<token> tokens;

while(t.hasNext()) {
tokens.push_back(t.getNext());
}

auto rpn = shuntingYard(tokens);
return evalRpn(rpn);
}
};
``````

Looks like your connection to LeetCode Discuss was lost, please wait while we try to reconnect.