public int LeastBricks(IList<IList<int>> wall) {
Dictionary<int,int> map = new Dictionary<int,int>();
int max = 0;
foreach (IList<int> row in wall)
{
int sum = 0;
for (int c = 0; c < row.Count  1; c++)
{
sum += row[c];
if (!map.ContainsKey(sum))
{
map[sum] = 0;
}
map[sum]++;
if (map[sum] > max) max = map[sum];
}
}
return wall.Count  max;
}
Jesse D.
@jdrogin
Software Engineer Seattle, Washington
Posts made by jdrogin

C#  hash map solution

C#  1 liner
public string ReverseWords(string s) { return string.Join(" ", s.Split().Select(w => new string(w.ToCharArray().Reverse().ToArray()))); }

RE: Simple Java Solution
@StefanPochmann good catch, I have no proof, in fact I don't even know why I was thinking that in the first place. I suppose because I missed that when I solved the problem and it passed the test cases, I didn't even really think what it was for. HOWEVER, it does turn out to be true at least for the range input for this problem
Note: The input number n will not exceed 100,000,000. (1e8)
if (i != num / i) sum += num / i;
this case is guarding against the square root being adding twice which will only occur for inputs which are perfect squares (4, 9, 16, 25, 36, 49, etc.). I wrote a script to check if any squares were effected by my "mistake" code either by turning a true to false or vice versa and none were. Does that count as a proof? LOL!

C#  simple no square root calc
public bool CheckPerfectNumber(int num) { if (num < 2) return false; int sum = 1; for (int x = 2; x * x <= num; x++) { if (num % x == 0) { sum += x; sum += num / x; } } return sum == num; }

RE: Simple Java Solution
same concept but you can avoid the Sqrt(num) calculation by checking square instead. Also, that inner if statement can be avoided.
public bool CheckPerfectNumber(int num) { if (num < 2) return false; int sum = 1; for (int x = 2; x * x <= num; x++) { if (num % x == 0) { sum += x; sum += num / x; } } return sum == num; }

RE: C#  O(n) time and O(n) space  simple 2 pointer
@StefanPochmann thanks for your analysis, this is very interesting, please help me understand this further. The way I was looking at it is the "right" pointer continuously advances from the end to the beginning and thus "n" operations. But you correctly point out that each operation is combining strings that double in length for each "round".
So, my question is does it cost more to combine longer strings? At first thought, surely it costs more, but I believe this will depend on the implementation. If the strings are linked lists it would be constant time to combine them regardless of the length. Here I am not using linked lists, rather just default string concatenation. Thanks for your explanation!

C#  DP with HashSet O(n^2) time and O(n^2) memory
public bool CanCross(int[] stones) { HashSet<int>[] steps = new HashSet<int>[stones.Length]; steps[0] = new HashSet<int>(); steps[0].Add(0); Array.Sort(stones); for (int i = 1; i < stones.Length; i++) { steps[i] = new HashSet<int>(); for (int j = 0; j < i; j++) { int diff = stones[i]  stones[j]; if (steps[j].Contains(diff)  steps[j].Contains(diff + 1)  steps[j].Contains(diff  1)) { steps[i].Add(diff); } } } return steps[steps.Length  1].Count > 0; }

RE: Verbose Java O(m*n) Solution, HashMap
@shawngao great solution, I've got the same here but I've optimized on the "row key" by converting the pixel string to an array of longs then to a string by concatenating the longs which should yield a much more compact row key.
public int FindBlackPixel(char[,] picture, int N) { int n = picture.GetLength(0); int m = picture.GetLength(1); int[] rowCnts = new int[n]; int[] colCnts = new int[m]; string[] rowKeys = new string[n]; Dictionary<string, int> rowKeyMap = new Dictionary<string,int>(); for (int i = 0; i < n; i++) { long[] keyArr = new long[(m+63)/64]; for (int j = 0; j < m; j++) { if (picture[i,j] == 'B') { rowCnts[i]++; colCnts[j]++; keyArr[j/64] = (long)1 << (j % 64); } } string key = ""; foreach (long x in keyArr) key+= x.ToString() + ","; rowKeys[i] = key; if (!rowKeyMap.ContainsKey(key)) rowKeyMap[key] = 0; rowKeyMap[key]++; } int lonelyCnt = 0; for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { if (picture[i,j] == 'B' && rowCnts[i] == N && colCnts[j] == N && rowKeyMap[rowKeys[i]] == N) { lonelyCnt++; } } } return lonelyCnt; }

RE: [Java] [C++] Clean Code with Explanation  map<int, set<int>>
This is a nice clean solution however I believe you have some inefficiency here in this block (Java code)
for (int row : cols.get(j)) { if (!rows.get(i).equals(rows.get(row))) { lonely = 0; break; } }
note that Set.equals() is an O(size of set) operation.
https://docs.oracle.com/javase/7/docs/api/java/util/AbstractSet.html#equals(java.lang.Object)you are comparing each row to every candidate row by checking potentially all cells in the row (if the row contained all 'B' for example). If, for example, rows 1, 2, 3, 4 were all equal and each had 10 B's. On row 1, B 1 you would check for row equality against 1, 2, 3, 4. Then row 1 B 2 you'd do the same and again for each B in the row. Then again for rows 2, 3, 4, each time checking against rows 1, 2, 3, 4. This is repeating comparisons that you have already done.
This is where the "row key" approach has the advantage. Compute a "row key" which uniquely identifies the row and you need only keep a count of the frequency of each row key. From there you can massively reduce the number of redundant checking needed. Here is my row key solution.
public int FindBlackPixel(char[,] picture, int N) { int n = picture.GetLength(0); int m = picture.GetLength(1); int[] rowCnts = new int[n]; int[] colCnts = new int[m]; string[] rowKeys = new string[n]; Dictionary<string, int> rowKeyMap = new Dictionary<string,int>(); for (int i = 0; i < n; i++) { long[] keyArr = new long[(m+63)/64]; for (int j = 0; j < m; j++) { if (picture[i,j] == 'B') { rowCnts[i]++; colCnts[j]++; keyArr[j/64] = (long)1 << (j % 64); } } string key = ""; foreach (long x in keyArr) key+= x.ToString() + ","; rowKeys[i] = key; if (!rowKeyMap.ContainsKey(key)) rowKeyMap[key] = 0; rowKeyMap[key]++; } int lonelyCnt = 0; for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { if (picture[i,j] == 'B' && rowCnts[i] == N && colCnts[j] == N && rowKeyMap[rowKeys[i]] == N) { lonelyCnt++; } } } return lonelyCnt; }

C#  row keys using bit to long array for efficient hashing
The solution to this problem is to precount the
'B'
in each column and row but also to somehow compare rows. What comes to mind is we need a key to represent each row and as long as we know that there arex
rows with the same key we can solve this problem by checking each'B'
has a row count, column count and row key count all equal toN
.The difficultly really comes down to how to efficiently form the row keys. We could simply use a string and build the string out of all the characters in a row. But we can do much better if we look at each pixel as zero or one then realize this can be "serialized" as an integer (or long in this case) array. Then we can finally form the row key from the array of longs which will be much more compact than just serializing each pixel into our string key.
public int FindBlackPixel(char[,] picture, int N) { int n = picture.GetLength(0); int m = picture.GetLength(1); int[] rowCnts = new int[n]; int[] colCnts = new int[m]; string[] rowKeys = new string[n]; Dictionary<string, int> rowKeyMap = new Dictionary<string,int>(); for (int i = 0; i < n; i++) { long[] keyArr = new long[(m+63)/64]; for (int j = 0; j < m; j++) { if (picture[i,j] == 'B') { rowCnts[i]++; colCnts[j]++; keyArr[j/64] = (long)1 << (j % 64); } } string key = ""; foreach (long x in keyArr) key+= x.ToString() + ","; rowKeys[i] = key; if (!rowKeyMap.ContainsKey(key)) rowKeyMap[key] = 0; rowKeyMap[key]++; } int lonelyCnt = 0; for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { if (picture[i,j] == 'B' && rowCnts[i] == N && colCnts[j] == N && rowKeyMap[rowKeys[i]] == N) { lonelyCnt++; } } } return lonelyCnt; }