From f32b5feeaaa51731cb6e53e267ff4a6198fc22e6 Mon Sep 17 00:00:00 2001 From: Derek Melchin <38889814+DerekMelchin@users.noreply.github.com> Date: Wed, 4 Sep 2024 16:42:01 -0600 Subject: [PATCH] Create JIT Compilation.html --- .../13 Debugging Tools/JIT Compilation.html | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 03 Writing Algorithms/01 Key Concepts/13 Debugging Tools/JIT Compilation.html diff --git a/03 Writing Algorithms/01 Key Concepts/13 Debugging Tools/JIT Compilation.html b/03 Writing Algorithms/01 Key Concepts/13 Debugging Tools/JIT Compilation.html new file mode 100644 index 0000000000..b94336781e --- /dev/null +++ b/03 Writing Algorithms/01 Key Concepts/13 Debugging Tools/JIT Compilation.html @@ -0,0 +1,95 @@ +

+ To increase the speed of some methods, you can use decorators like @jit or @lru_cache. + The @jit decorator from Numba is for Just-In-Time (JIT) compilation. + It compiles Python code to machine code at runtime to increase the speed of loops and mathematical operations. + However, Numba can't compile all Python code. + It performs best with NumPy arrays. + If you add the @jit decorator to your methods, it can make the debugging process more challenging since the code is compiled to machine code. + The following code snippet shows an example of using the @jit decorator: +

+ +
+
import numpy as np
+from numba import jit
+import time
+
+# Without JIT
+def slow_function(arr):
+    result = 0
+    for i in range(len(arr)):
+        result += np.sin(arr[i]) * np.cos(arr[i])
+    return result
+
+# With JIT
+@jit(nopython=True)
+def fast_function(arr):
+    result = 0
+    for i in range(len(arr)):
+        result += np.sin(arr[i]) * np.cos(arr[i])
+    return result
+
+# Example usage
+arr = np.random.rand(1000000) 
+
+# Test without JIT.
+start = time.time()
+slow_function(arr)
+print(f"Slow function took: {time.time() - start} seconds")
+
+# Test with JIT.
+start = time.time()
+fast_function(arr)  # This will compile first, then run.
+print(f"Fast function took: {time.time() - start} seconds")
+
+ +
+

Slow function took: 1.1557221412658691 seconds

+

Fast function took: 0.32973551750183105 seconds

+
+
+ +

+ The @lru_cache decorator from the functools module is for Least Recently Used (LRU) caching. + It caches the result of a function based on its arguments, so that the function body doesn't have to repeatedly execute when you call the function multiple times with the same inputs. + The maxsize argument of the decorator defines the number of results to cache. + For example, @lru_cache(maxsize=512) means that it should cache a maximum of 512 results. + If you add the @lru_cache decorator to your methods, make sure your function is completely dependent on the arguments (not any global state variables) or else the caching process may lead to incorrect results. + The following code snippet shows an example of using the @lru_cache decorator: +

+ +
+
from functools import lru_cache
+import time
+
+# Without caching
+def fibonacci(n):
+    if n < 2:
+        return n
+    return fibonacci(n-1) + fibonacci(n-2)
+
+# With caching
+@lru_cache(maxsize=512)
+def fibonacci_cached(n):
+    if n < 2:
+        return n
+    return fibonacci_cached(n-1) + fibonacci_cached(n-2)
+
+n = 30
+
+# Test without caching.
+start = time.time()
+for _ in range(n):
+    fibonacci(n)
+print(f"Fibonacci without cache took: {time.time() - start} seconds")
+
+# Test with caching.
+start = time.time()
+for _ in range(n):
+    fibonacci_cached(n)
+print(f"Fibonacci with cache took: {time.time() - start} seconds")
+
+ +
+

Fibonacci without cache took: 4.387492418289185 seconds

+

Fibonacci with cache took: 9.322166442871094e-05 seconds

+