Evolve from brute force to dp

1. O(2^n) DFS
``````    bool canPartition(vector<int>& nums) {
int sum = accumulate(nums.begin(),nums.end(),0);
if(sum&1) return 0;
sum>>=1;
return dfs(0,sum,nums);
}
bool dfs(int p, int sum, vector<int>& nums) {
if(!sum) return 1;
if(sum<0 || p==nums.size()) return 0;
return dfs(p+1,sum,nums)||dfs(p+1,sum-nums[p],nums);
}
``````
1. O(ns) DFS with Memoization. if there is no constraint on the array size and number size, I would say this is the overall best solution.
``````    bool canPartition(vector<int>& nums) {
int sum = accumulate(nums.begin(),nums.end(),0);
if(sum&1) return 0;
vector<unordered_map<int,bool>> mem(nums.size());
return dfs(0,sum>>1,nums,mem);
}
bool dfs(int p, int sum, vector<int>& nums, vector<unordered_map<int,bool>>& mem) {
if(!sum) return 1;
if(sum<0 || p==nums.size()) return 0;
auto it = mem[p].find(sum);
if(it!=mem[p].end()) return it->second;
return mem[p][sum]=dfs(p+1,sum,nums,mem)||dfs(p+1,sum-nums[p],nums,mem);
}
``````
1. O(ns) time, O(ns) space DP/BFS. Starting with 0, we add the next number to the current level sum and generate next level sums. The problem asks if there exist one solution. For this kind of problem, BFS might waste some time while DFS which just goes for one solution could be better. This is similar to word break.
``````    bool canPartition(vector<int>& nums) {
int sum = accumulate(nums.begin(),nums.end(),0), n = nums.size();
if(sum&1) return 0;
vector<unordered_set<int>> dp(n+1);
dp[0].insert(0);
for(int i=0;i<n;i++)
for(int s:dp[i]) {
if(s==sum/2) return 1;
if(s>sum/2) continue;
dp[i+1].insert(s);
dp[i+1].insert(s+nums[i]);
}
return 0;
}
``````
1. O(ns) time, O(s) space dp. We only need to cache the current level and the next level.
``````    bool canPartition(vector<int>& nums) {
int sum = accumulate(nums.begin(),nums.end(),0);
if(sum&1) return 0;
unordered_set<int> cur, nxt({0});
for(int n:nums) {
cur = nxt;
for(int s:cur) {
if(s==sum/2) return 1;
if(s+n<=sum/2) nxt.insert(s+n);
}
}
return 0;
}
``````
1. O(ns) DP. Due to the small input data, we can use a bit vector to represent all sums. This saves the overhead of hashtable. The dp/BFS approach just expands the current sums, so if they are in order, we can just update the current level.
``````    bool canPartition(vector<int>& nums) {
int sum = accumulate(nums.begin(),nums.end(),0);
if(sum&1) return 0;
vector<bool> dp(1+(sum>>=1));
dp[0]=1;
for(int n:nums) {
for(int i=sum-1;i>=0;i--) {
if(!dp[i]) continue;
if(i+n<=sum) dp[i+n]=1;
}
if(dp[sum]) return 1;
}
return 0;
}
``````

What if the input numbers are arbitrary? Then it becomes similar to target sum. Assuming no overflow, then we need to generate all possible sums. I think #2 should do better than #5.

Looks like your connection to LeetCode Discuss was lost, please wait while we try to reconnect.