Skip to content

Elementwise

Elementwise ops operate on a per element basis. They don't change the shape of the tensor.

Unary Ops (math)¤

logical_not ¤

logical_not()

Computes the logical NOT of the tensor element-wise.

print(Tensor([False, True]).logical_not().numpy())
[ True False]
Source code in tinygrad/tensor.py
2408
2409
2410
2411
2412
2413
2414
2415
2416
def logical_not(self):
  """
  Computes the logical NOT of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([False, True]).logical_not().numpy())
  ```
  """
  return F.Neq.apply(*self.cast(dtypes.bool)._broadcasted(True))

neg ¤

neg()

Negates the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).neg().numpy())
[ 3.  2.  1. -0. -1. -2. -3.]
Source code in tinygrad/tensor.py
2417
2418
2419
2420
2421
2422
2423
2424
2425
def neg(self):
  """
  Negates the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).neg().numpy())
  ```
  """
  return self*-1 if self.dtype != dtypes.bool else self.logical_not()

log ¤

log()

Computes the natural logarithm element-wise.

See: https://en.wikipedia.org/wiki/Logarithm

print(Tensor([1., 2., 4., 8.]).log().numpy())
[0.     0.6931 1.3863 2.0794]
Source code in tinygrad/tensor.py
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
def log(self):
  """
  Computes the natural logarithm element-wise.

  See: https://en.wikipedia.org/wiki/Logarithm

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 4., 8.]).log().numpy())
  ```
  """
  return F.Log.apply(self.cast(least_upper_float(self.dtype)))

log2 ¤

log2()

Computes the base-2 logarithm element-wise.

See: https://en.wikipedia.org/wiki/Logarithm

print(Tensor([1., 2., 4., 8.]).log2().numpy())
[0. 1. 2. 3.]
Source code in tinygrad/tensor.py
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
def log2(self):
  """
  Computes the base-2 logarithm element-wise.

  See: https://en.wikipedia.org/wiki/Logarithm

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 4., 8.]).log2().numpy())
  ```
  """
  return self.log()/math.log(2)

exp ¤

exp()

Computes the exponential function element-wise.

See: https://en.wikipedia.org/wiki/Exponential_function

print(Tensor([0., 1., 2., 3.]).exp().numpy())
[ 1.      2.7183  7.3891 20.0855]
Source code in tinygrad/tensor.py
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
def exp(self):
  """
  Computes the exponential function element-wise.

  See: https://en.wikipedia.org/wiki/Exponential_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., 1., 2., 3.]).exp().numpy())
  ```
  """
  return F.Exp.apply(self.cast(least_upper_float(self.dtype)))

exp2 ¤

exp2()

Computes the base-2 exponential function element-wise.

See: https://en.wikipedia.org/wiki/Exponential_function

print(Tensor([0., 1., 2., 3.]).exp2().numpy())
[1. 2. 4. 8.]
Source code in tinygrad/tensor.py
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
def exp2(self):
  """
  Computes the base-2 exponential function element-wise.

  See: https://en.wikipedia.org/wiki/Exponential_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., 1., 2., 3.]).exp2().numpy())
  ```
  """
  return F.Exp.apply(self*math.log(2))

sqrt ¤

sqrt()

Computes the square root of the tensor element-wise.

print(Tensor([1., 2., 3., 4.]).sqrt().numpy())
[1.     1.4142 1.7321 2.    ]
Source code in tinygrad/tensor.py
2519
2520
2521
2522
2523
2524
2525
2526
2527
def sqrt(self):
  """
  Computes the square root of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).sqrt().numpy())
  ```
  """
  return F.Sqrt.apply(self.cast(least_upper_float(self.dtype)))

rsqrt ¤

rsqrt()

Computes the reciprocal of the square root of the tensor element-wise.

print(Tensor([1., 2., 3., 4.]).rsqrt().numpy())
[1.     0.7071 0.5774 0.5   ]
Source code in tinygrad/tensor.py
2528
2529
2530
2531
2532
2533
2534
2535
2536
def rsqrt(self):
  """
  Computes the reciprocal of the square root of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).rsqrt().numpy())
  ```
  """
  return self.reciprocal().sqrt()

sin ¤

sin()

Computes the sine of the tensor element-wise.

print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).sin().numpy())
[ 0.  1. -0. -1.  0.]
Source code in tinygrad/tensor.py
2537
2538
2539
2540
2541
2542
2543
2544
2545
def sin(self):
  """
  Computes the sine of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).sin().numpy())
  ```
  """
  return F.Sin.apply(self.cast(least_upper_float(self.dtype)))

cos ¤

cos()

Computes the cosine of the tensor element-wise.

print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).cos().numpy())
[ 1.0000e+00  0.0000e+00 -1.0000e+00 -2.3842e-07  1.0000e+00]
Source code in tinygrad/tensor.py
2546
2547
2548
2549
2550
2551
2552
2553
2554
def cos(self):
  """
  Computes the cosine of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/2, math.pi, 3*math.pi/2, 2*math.pi]).cos().numpy())
  ```
  """
  return ((math.pi/2)-self).sin()

tan ¤

tan()

Computes the tangent of the tensor element-wise.

print(Tensor([0., math.pi/4, math.pi/2, 3*math.pi/4, math.pi]).tan().numpy())
[ 0.  1. inf -1.  0.]
Source code in tinygrad/tensor.py
2555
2556
2557
2558
2559
2560
2561
2562
2563
def tan(self):
  """
  Computes the tangent of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([0., math.pi/4, math.pi/2, 3*math.pi/4, math.pi]).tan().numpy())
  ```
  """
  return self.sin() / self.cos()

asin ¤

asin()

Computes the inverse sine (arcsine) of the tensor element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).asin().numpy())
[-1.1198 -0.6435 -0.3047  0.      0.3047  0.6435  1.1198]
Source code in tinygrad/tensor.py
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
def asin(self):
  """
  Computes the inverse sine (arcsine) of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).asin().numpy())
  ```
  """
  # https://personal.math.ubc.ca/~cbm/aands/page_81.htm 4.4.46
  coefficients = [-0.0012624911, 0.0066700901, -0.0170881256, 0.0308918810, -0.0501743046, 0.0889789874, -0.2145988016, 1.5707963050]
  x = math.pi / 2 - (1.0 - self.abs()).sqrt() * polyN(self.abs(), coefficients)
  return self.sign() * x

acos ¤

acos()

Computes the inverse cosine (arccosine) of the tensor element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).acos().numpy())
[2.6906 2.2143 1.8755 1.5708 1.2661 0.9273 0.451 ]
Source code in tinygrad/tensor.py
2578
2579
2580
2581
2582
2583
2584
2585
2586
def acos(self):
  """
  Computes the inverse cosine (arccosine) of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).acos().numpy())
  ```
  """
  return math.pi / 2 - self.asin()

atan ¤

atan()

Computes the inverse tangent (arctan) of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).atan().numpy())
[-1.249  -1.1071 -0.7854  0.      0.7854  1.1071  1.249 ]
Source code in tinygrad/tensor.py
2588
2589
2590
2591
2592
2593
2594
2595
2596
def atan(self):
  """
  Computes the inverse tangent (arctan) of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).atan().numpy())
  ```
  """
  return (self / (1 + self * self).sqrt()).asin()

trunc ¤

trunc() -> Tensor

Truncates the tensor element-wise.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).trunc().numpy())
[-3. -2. -1.  0.  0.  1.  2.  3.]
Source code in tinygrad/tensor.py
2600
2601
2602
2603
2604
2605
2606
2607
2608
def trunc(self: Tensor) -> Tensor:
  """
  Truncates the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).trunc().numpy())
  ```
  """
  return self.cast(dtypes.int32).cast(self.dtype)

ceil ¤

ceil() -> Tensor

Rounds the tensor element-wise towards positive infinity.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).ceil().numpy())
[-3. -2. -1.  0.  1.  2.  3.  4.]
Source code in tinygrad/tensor.py
2609
2610
2611
2612
2613
2614
2615
2616
2617
def ceil(self: Tensor) -> Tensor:
  """
  Rounds the tensor element-wise towards positive infinity.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).ceil().numpy())
  ```
  """
  return (self > (b := self.trunc())).where(b+1, b)

floor ¤

floor() -> Tensor

Rounds the tensor element-wise towards negative infinity.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).floor().numpy())
[-4. -3. -2. -1.  0.  1.  2.  3.]
Source code in tinygrad/tensor.py
2618
2619
2620
2621
2622
2623
2624
2625
2626
def floor(self: Tensor) -> Tensor:
  """
  Rounds the tensor element-wise towards negative infinity.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).floor().numpy())
  ```
  """
  return (self < (b := self.trunc())).where(b-1, b)

round ¤

round() -> Tensor

Rounds the tensor element-wise with rounding half to even.

print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).round().numpy())
[-4. -2. -2.  0.  0.  2.  2.  4.]
Source code in tinygrad/tensor.py
2627
2628
2629
2630
2631
2632
2633
2634
2635
def round(self: Tensor) -> Tensor:
  """
  Rounds the tensor element-wise with rounding half to even.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]).round().numpy())
  ```
  """
  return ((self > 0) == ((b := self.cast(dtypes.int32) / 2.0).cast(dtypes.int32) == b)).where((self - 0.5).ceil(), (self + 0.5).floor())

isinf ¤

isinf(
    detect_positive: bool = True,
    detect_negative: bool = True,
)

Checks the tensor element-wise to return True where the element is infinity, otherwise returns False

print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isinf().numpy())
[False  True False  True False]
Source code in tinygrad/tensor.py
2637
2638
2639
2640
2641
2642
2643
2644
2645
def isinf(self:Tensor, detect_positive:bool=True, detect_negative:bool=True):
  """
  Checks the tensor element-wise to return True where the element is infinity, otherwise returns False

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isinf().numpy())
  ```
  """
  return (self == float("inf")) * detect_positive + (self == float("-inf")) * detect_negative

isnan ¤

isnan()

Checks the tensor element-wise to return True where the element is NaN, otherwise returns False

print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isnan().numpy())
[False False False False  True]
Source code in tinygrad/tensor.py
2646
2647
2648
2649
2650
2651
2652
2653
2654
def isnan(self:Tensor):
  """
  Checks the tensor element-wise to return True where the element is NaN, otherwise returns False

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, float('inf'), 2, float('-inf'), float('nan')]).isnan().numpy())
  ```
  """
  return self != self

lerp ¤

lerp(end: Tensor, weight: Union[Tensor, float]) -> Tensor

Linearly interpolates between self and end by weight.

print(Tensor([1., 2., 3.]).lerp(Tensor([4., 5., 6.]), 0.5).numpy())
[2.5 3.5 4.5]
Source code in tinygrad/tensor.py
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
def lerp(self, end: Tensor, weight: Union[Tensor, float]) -> Tensor:
  """
  Linearly interpolates between `self` and `end` by `weight`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3.]).lerp(Tensor([4., 5., 6.]), 0.5).numpy())
  ```
  """
  if self.dtype == dtypes.uint8 and isinstance(weight, Tensor):
    w_i = (weight * (1<<(W_PREC:=7)) + 0.5).cast(dtypes.int16)
    return (self+(((end - self).cast(dtypes.int8) * w_i + (1<<W_PREC-1)).cast(dtypes.uint16) >> W_PREC)).cast(dtypes.uint8)
  return self + (end - self) * weight

square ¤

square()

Squares the tensor element-wise. Equivalent to self*self.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).square().numpy())
[9. 4. 1. 0. 1. 4. 9.]
Source code in tinygrad/tensor.py
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
def square(self):
  """
  Squares the tensor element-wise.
  Equivalent to `self*self`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).square().numpy())
  ```
  """
  return self*self

clamp ¤

clamp(min_=None, max_=None)

Clips (clamps) the values in the tensor between min_ and max_ element-wise. If min_ is None, there is no lower bound. If max_ is None, there is no upper bound.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).clip(-1, 1).numpy())
[-1. -1. -1.  0.  1.  1.  1.]
Source code in tinygrad/tensor.py
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
def clamp(self, min_=None, max_=None):
  """
  Clips (clamps) the values in the tensor between `min_` and `max_` element-wise.
  If `min_` is `None`, there is no lower bound. If `max_` is None, there is no upper bound.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).clip(-1, 1).numpy())
  ```
  """
  if min_ is None and max_ is None: raise RuntimeError("at least one of 'min_' or 'max_' must not be None")
  ret = self.maximum(min_) if min_ is not None else self
  return ret.minimum(max_) if max_ is not None else ret

clip ¤

clip(min_=None, max_=None)

Alias for Tensor.clamp.

Source code in tinygrad/tensor.py
2691
2692
2693
2694
2695
def clip(self, min_=None, max_=None):
  """
  Alias for `Tensor.clamp`.
  """
  return self.clamp(min_, max_)

sign ¤

sign()

Returns the sign of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sign().numpy())
[-1. -1. -1.  0.  1.  1.  1.]
Source code in tinygrad/tensor.py
2696
2697
2698
2699
2700
2701
2702
2703
2704
def sign(self):
  """
  Returns the sign of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sign().numpy())
  ```
  """
  return F.Sign.apply(self)

abs ¤

abs()

Computes the absolute value of the tensor element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).abs().numpy())
[3. 2. 1. 0. 1. 2. 3.]
Source code in tinygrad/tensor.py
2705
2706
2707
2708
2709
2710
2711
2712
2713
def abs(self):
  """
  Computes the absolute value of the tensor element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).abs().numpy())
  ```
  """
  return self * self.sign()

reciprocal ¤

reciprocal()

Compute 1/x element-wise.

print(Tensor([1., 2., 3., 4.]).reciprocal().numpy())
[1.     0.5    0.3333 0.25  ]
Source code in tinygrad/tensor.py
2714
2715
2716
2717
2718
2719
2720
2721
2722
def reciprocal(self):
  """
  Compute `1/x` element-wise.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1., 2., 3., 4.]).reciprocal().numpy())
  ```
  """
  return F.Reciprocal.apply(self.cast(least_upper_float(self.dtype)))

Unary Ops (activation)¤

relu ¤

relu()

Applies the Rectified Linear Unit (ReLU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).relu().numpy())
[0. 0. 0. 0. 1. 2. 3.]
Source code in tinygrad/tensor.py
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
def relu(self):
  """
  Applies the Rectified Linear Unit (ReLU) function element-wise.

  - Described: https://paperswithcode.com/method/relu

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).relu().numpy())
  ```
  """
  return F.Relu.apply(self)

sigmoid ¤

sigmoid()

Applies the Sigmoid function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sigmoid().numpy())
[0.0474 0.1192 0.2689 0.5    0.7311 0.8808 0.9526]
Source code in tinygrad/tensor.py
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
def sigmoid(self):
  """
  Applies the Sigmoid function element-wise.

  - Described: https://en.wikipedia.org/wiki/Sigmoid_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sigmoid().numpy())
  ```
  """
  return (1 + (self * (-1/math.log(2))).exp2()).reciprocal()

hardsigmoid ¤

hardsigmoid(alpha: float = 1 / 6, beta: float = 0.5)

Applies the Hardsigmoid function element-wise. NOTE: default alpha and beta values is taken from torch

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardsigmoid().numpy())
[0.     0.1667 0.3333 0.5    0.6667 0.8333 1.    ]
Source code in tinygrad/tensor.py
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
def hardsigmoid(self, alpha:float=1/6, beta:float=0.5):
  """
  Applies the Hardsigmoid function element-wise.
  NOTE: default `alpha` and `beta` values is taken from torch

  - Described: https://paperswithcode.com/method/hard-sigmoid
  - See: https://pytorch.org/docs/stable/generated/torch.nn.functional.hardsigmoid.html

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardsigmoid().numpy())
  ```
  """
  return (alpha * self + beta).relu() - (alpha * self + beta - 1).relu()

elu ¤

elu(alpha=1.0)

Applies the Exponential Linear Unit (ELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).elu().numpy())
[-0.9502 -0.8647 -0.6321  0.      1.      2.      3.    ]
Source code in tinygrad/tensor.py
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
def elu(self, alpha=1.0):
  """
  Applies the Exponential Linear Unit (ELU) function element-wise.

  - Described: https://paperswithcode.com/method/elu
  - Paper: https://arxiv.org/abs/1511.07289v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).elu().numpy())
  ```
  """
  return self.relu() - alpha*(1-self.exp()).relu()

celu ¤

celu(alpha=1.0)

Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).celu().numpy())
[-0.9502 -0.8647 -0.6321  0.      1.      2.      3.    ]
Source code in tinygrad/tensor.py
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
def celu(self, alpha=1.0):
  """
  Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise.

  - Described: https://paperswithcode.com/method/celu
  - Paper: https://arxiv.org/abs/1704.07483

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).celu().numpy())
  ```
  """
  return self.maximum(0) + (alpha * ((self / alpha).exp() - 1)).minimum(0)

selu ¤

selu(alpha=1.67326, gamma=1.0507)

Applies the Scaled Exponential Linear Unit (SELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).selu().numpy())
[-1.6706 -1.5202 -1.1113  0.      1.0507  2.1014  3.1521]
Source code in tinygrad/tensor.py
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
def selu(self, alpha=1.67326, gamma=1.0507):
  """
  Applies the Scaled Exponential Linear Unit (SELU) function element-wise.

  - Described: https://paperswithcode.com/method/selu
  - Paper: https://arxiv.org/abs/1706.02515v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).selu().numpy())
  ```
  """
  return gamma * (self >= 0).detach().where(self, alpha * (self.exp() - 1))

swish ¤

swish()

See .silu()

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).swish().numpy())
[-0.1423 -0.2384 -0.2689  0.      0.7311  1.7616  2.8577]
Source code in tinygrad/tensor.py
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
def swish(self):
  """
  See `.silu()`

  - Paper: https://arxiv.org/abs/1710.05941v1

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).swish().numpy())
  ```
  """
  return self * self.sigmoid()

silu ¤

silu()

Applies the Sigmoid Linear Unit (SiLU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).silu().numpy())
[-0.1423 -0.2384 -0.2689  0.      0.7311  1.7616  2.8577]
Source code in tinygrad/tensor.py
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
def silu(self):
  """
  Applies the Sigmoid Linear Unit (SiLU) function element-wise.

  - Described: https://paperswithcode.com/method/silu
  - Paper: https://arxiv.org/abs/1606.08415

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).silu().numpy())
  ```
  """
  return self.swish()   # The SiLU function is also known as the swish function.

relu6 ¤

relu6()

Applies the ReLU6 function element-wise.

print(Tensor([-9., -6., -3., 0., 3., 6., 9.]).relu6().numpy())
[0. 0. 0. 0. 3. 6. 6.]
Source code in tinygrad/tensor.py
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
def relu6(self):
  """
  Applies the ReLU6 function element-wise.

  - Described: https://paperswithcode.com/method/relu6
  - Paper: https://arxiv.org/abs/1704.04861v1

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-9., -6., -3., 0., 3., 6., 9.]).relu6().numpy())
  ```
  """
  return self.relu() - (self-6).relu()

hardswish ¤

hardswish()

Applies the Hardswish function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardswish().numpy())
[-0.     -0.3333 -0.3333  0.      0.6667  1.6667  3.    ]
Source code in tinygrad/tensor.py
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
def hardswish(self):
  """
  Applies the Hardswish function element-wise.

  - Described: https://paperswithcode.com/method/hard-swish
  - Paper: https://arxiv.org/abs/1905.02244v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).hardswish().numpy())
  ```
  """
  return self * (self+3).relu6() * (1/6)

tanh ¤

tanh()

Applies the Hyperbolic Tangent (tanh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).tanh().numpy())
[-0.9951 -0.964  -0.7616  0.      0.7616  0.964   0.9951]
Source code in tinygrad/tensor.py
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
def tanh(self):
  """
  Applies the Hyperbolic Tangent (tanh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Tanh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).tanh().numpy())
  ```
  """
  return 2.0 * ((2.0 * self).sigmoid()) - 1.0

sinh ¤

sinh()

Applies the Hyperbolic Sine (sinh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sinh().numpy())
[-10.0179  -3.6269  -1.1752   0.       1.1752   3.6269  10.0179]
Source code in tinygrad/tensor.py
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
def sinh(self):
  """
  Applies the Hyperbolic Sine (sinh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Sinh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).sinh().numpy())
  ```
  """
  return (self.exp() - self.neg().exp()) / 2

cosh ¤

cosh()

Applies the Hyperbolic Cosine (cosh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).cosh().numpy())
[10.0677  3.7622  1.5431  1.      1.5431  3.7622 10.0677]
Source code in tinygrad/tensor.py
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
def cosh(self):
  """
  Applies the Hyperbolic Cosine (cosh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Hyperbolic_functions#Cosh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).cosh().numpy())
  ```
  """
  return (self.exp() + self.neg().exp()) / 2

atanh ¤

atanh()

Applies the Inverse Hyperbolic Tangent (atanh) function element-wise.

print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).atanh().numpy())
[-1.4722 -0.6931 -0.3095  0.      0.3095  0.6931  1.4722]
Source code in tinygrad/tensor.py
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
def atanh(self):
  """
  Applies the Inverse Hyperbolic Tangent (atanh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#atanh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-0.9, -0.6, -0.3, 0., 0.3, 0.6, 0.9]).atanh().numpy())
  ```
  """
  return ((1 + self)/(1 - self)).log() / 2

asinh ¤

asinh()

Applies the Inverse Hyperbolic Sine (asinh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).asinh().numpy())
[-1.8184 -1.4436 -0.8814  0.      0.8814  1.4436  1.8184]
Source code in tinygrad/tensor.py
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
def asinh(self):
  """
  Applies the Inverse Hyperbolic Sine (asinh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#asinh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).asinh().numpy())
  ```
  """
  return (self + (self.square() + 1).sqrt()).log()

acosh ¤

acosh()

Applies the Inverse Hyperbolic Cosine (acosh) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).acosh().numpy())
[   nan    nan    nan    nan 0.     1.317  1.7627]
Source code in tinygrad/tensor.py
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
def acosh(self):
  """
  Applies the Inverse Hyperbolic Cosine (acosh) function element-wise.

  - Described: https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#acosh

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).acosh().numpy())
  ```
  """
  return (self + (self.square() - 1).sqrt()).log()

hardtanh ¤

hardtanh(min_val=-1, max_val=1)

Applies the Hardtanh function element-wise.

print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).hardtanh().numpy())
[-1.  -1.  -0.5  0.   0.5  1.   1. ]
Source code in tinygrad/tensor.py
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
def hardtanh(self, min_val=-1, max_val=1):
  """
  Applies the Hardtanh function element-wise.

  - Described: https://paperswithcode.com/method/hardtanh-activation

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).hardtanh().numpy())
  ```
  """
  return self.clip(min_val, max_val)

erf ¤

erf()

Applies error function element-wise.

print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).erf().numpy())
[-0.9661 -0.8427 -0.5205  0.      0.5205  0.8427  0.9661]
Source code in tinygrad/tensor.py
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
def erf(self):
  """
  Applies error function element-wise.

  - Described: https://en.wikipedia.org/wiki/Error_function

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1.5, -1.0, -0.5, 0., 0.5, 1.0, 1.5]).erf().numpy())
  ```
  """
  # https://personal.math.ubc.ca/~cbm/aands/page_299.htm 7.1.26
  t = 1.0 / (1.0 + 0.3275911 * self.abs())
  return self.sign() * (1.0 - t * polyN(t, [1.061405429, -1.453152027, 1.421413741, -0.284496736, 0.254829592]) * (-self.square()).exp())

gelu ¤

gelu()

Applies the Gaussian Error Linear Unit (GELU) function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).gelu().numpy())
[-0.0036 -0.0454 -0.1588  0.      0.8412  1.9546  2.9964]
Source code in tinygrad/tensor.py
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
def gelu(self):
  """
  Applies the Gaussian Error Linear Unit (GELU) function element-wise.

  - Described: https://paperswithcode.com/method/gelu
  - Paper: https://arxiv.org/abs/1606.08415v5

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).gelu().numpy())
  ```
  """
  return 0.5 * self * (1 + (math.sqrt(2 / math.pi) * (self + 0.044715 * self ** 3)).tanh())

quick_gelu ¤

quick_gelu()

Applies the Sigmoid GELU approximation element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).quick_gelu().numpy())
[-0.0181 -0.0643 -0.1542  0.      0.8458  1.9357  2.9819]
Source code in tinygrad/tensor.py
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
def quick_gelu(self):
  """
  Applies the Sigmoid GELU approximation element-wise.

  - Described: https://paperswithcode.com/method/gelu

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).quick_gelu().numpy())
  ```
  """
  return self * (self * 1.702).sigmoid()

leakyrelu ¤

leakyrelu(neg_slope=0.01)

Applies the Leaky ReLU function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leakyrelu().numpy())
[-0.03 -0.02 -0.01  0.    1.    2.    3.  ]
print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leakyrelu(neg_slope=0.42).numpy())
[-1.26 -0.84 -0.42  0.    1.    2.    3.  ]

Source code in tinygrad/tensor.py
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
def leakyrelu(self, neg_slope=0.01):
  """
  Applies the Leaky ReLU function element-wise.

  - Described: https://paperswithcode.com/method/leaky-relu

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leakyrelu().numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).leakyrelu(neg_slope=0.42).numpy())
  ```
  """
  return self.relu() - (-neg_slope*self).relu()

mish ¤

mish()

Applies the Mish function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).mish().numpy())
[-0.1456 -0.2525 -0.3034  0.      0.8651  1.944   2.9865]
Source code in tinygrad/tensor.py
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
def mish(self):
  """
  Applies the Mish function element-wise.

  - Described: https://paperswithcode.com/method/mish
  - Paper: https://arxiv.org/abs/1908.08681v3

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).mish().numpy())
  ```
  """
  return self * self.softplus().tanh()

softplus ¤

softplus(beta=1)

Applies the Softplus function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softplus().numpy())
[0.0486 0.1269 0.3133 0.6931 1.3133 2.1269 3.0486]
Source code in tinygrad/tensor.py
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
def softplus(self, beta=1):
  """
  Applies the Softplus function element-wise.

  - Described: https://paperswithcode.com/method/softplus

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softplus().numpy())
  ```
  """
  return (1/beta) * (1 + (self*beta).exp()).log()

softsign ¤

softsign()

Applies the Softsign function element-wise.

print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softsign().numpy())
[-0.75   -0.6667 -0.5     0.      0.5     0.6667  0.75  ]
Source code in tinygrad/tensor.py
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
def softsign(self):
  """
  Applies the Softsign function element-wise.

  - Described: https://paperswithcode.com/method/softsign

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-3., -2., -1., 0., 1., 2., 3.]).softsign().numpy())
  ```
  """
  return self / (1 + self.abs())

Elementwise Ops (broadcasted)¤

add ¤

add(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Adds self and x. Equivalent to self + x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.add(20).numpy())
[19.4856 21.085  20.9089 19.9159]
print(t.add(Tensor([[2.0], [3.5]])).numpy())
[[1.4856 3.085  2.9089 1.9159]
 [2.9856 4.585  4.4089 3.4159]]

Source code in tinygrad/tensor.py
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
def add(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Adds `self` and `x`.
  Equivalent to `self + x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.add(20).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.add(Tensor([[2.0], [3.5]])).numpy())
  ```
  """
  return F.Add.apply(*self._broadcasted(x, reverse))

sub ¤

sub(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Subtracts x from self. Equivalent to self - x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.sub(20).numpy())
[-20.5144 -18.915  -19.0911 -20.0841]
print(t.sub(Tensor([[2.0], [3.5]])).numpy())
[[-2.5144 -0.915  -1.0911 -2.0841]
 [-4.0144 -2.415  -2.5911 -3.5841]]

Source code in tinygrad/tensor.py
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
def sub(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Subtracts `x` from `self`.
  Equivalent to `self - x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.sub(20).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.sub(Tensor([[2.0], [3.5]])).numpy())
  ```
  """
  a, b = self._broadcasted(x, reverse)
  return a + (-b)

mul ¤

mul(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Multiplies self and x. Equivalent to self * x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.mul(3).numpy())
[-1.5431  3.2549  2.7267 -0.2523]
print(t.mul(Tensor([[-1.0], [2.0]])).numpy())
[[ 0.5144 -1.085  -0.9089  0.0841]
 [-1.0287  2.17    1.8178 -0.1682]]

Source code in tinygrad/tensor.py
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
def mul(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Multiplies `self` and `x`.
  Equivalent to `self * x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.mul(3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.mul(Tensor([[-1.0], [2.0]])).numpy())
  ```
  """
  return F.Mul.apply(*self._broadcasted(x, reverse))

div ¤

div(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Divides self by x. Equivalent to self / x. Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs. div performs true division.

Tensor.manual_seed(42)
t = Tensor.randn(4)
print(t.numpy())
[-0.5144  1.085   0.9089 -0.0841]
print(t.div(3).numpy())
[-0.1715  0.3617  0.303  -0.028 ]
print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4])).numpy())
[0.5    1.3333 2.5   ]

Source code in tinygrad/tensor.py
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
def div(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Divides `self` by `x`.
  Equivalent to `self / x`.
  Supports broadcasting to a common shape, type promotion, and integer, float, boolean inputs.
  `div` performs true division.

  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  t = Tensor.randn(4)
  print(t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(t.div(3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 4, 10]).div(Tensor([2, 3, 4])).numpy())
  ```
  """
  numerator, denominator = self._broadcasted(x, reverse)
  return numerator.cast(least_upper_float(numerator.dtype)) * denominator.cast(least_upper_float(denominator.dtype)).reciprocal()

xor ¤

xor(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Computes bitwise xor of self and x. Equivalent to self ^ x. Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

print(Tensor([-1, -2, 3]).xor(Tensor([1, 0, 3])).numpy())
[-2 -2  0]
print(Tensor([True, True, False, False]).xor(Tensor([True, False, True, False])).numpy())
[False  True  True False]

Source code in tinygrad/tensor.py
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
def xor(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Computes bitwise xor of `self` and `x`.
  Equivalent to `self ^ x`.
  Supports broadcasting to a common shape, type promotion, and integer, boolean inputs.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, -2, 3]).xor(Tensor([1, 0, 3])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([True, True, False, False]).xor(Tensor([True, False, True, False])).numpy())
  ```
  """
  if self.dtype != dtypes.bool and not dtypes.is_int(self.dtype): raise RuntimeError(f"{self.dtype} is not supported")
  return F.Xor.apply(*self._broadcasted(x, reverse))

lshift ¤

lshift(x: int)

Computes left arithmetic shift of self by x bits. self must have unsigned dtype. Equivalent to self << x.

print(Tensor([1, 3, 31], dtype=dtypes.uint8).lshift(2).numpy())
[  4  12 124]
Source code in tinygrad/tensor.py
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
def lshift(self, x:int):
  """
  Computes left arithmetic shift of `self` by `x` bits. `self` must have unsigned dtype.
  Equivalent to `self << x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([1, 3, 31], dtype=dtypes.uint8).lshift(2).numpy())
  ```
  """
  assert dtypes.is_unsigned(self.dtype) and isinstance(x, int) and x >= 0, f"not supported {self.dtype=} {x=}"
  return self.mul(2 ** x)

rshift ¤

rshift(x: int)

Computes right arithmetic shift of self by x bits. self must have unsigned dtype. Equivalent to self >> x.

print(Tensor([4, 13, 125], dtype=dtypes.uint8).rshift(2).numpy())
[ 1  3 31]
Source code in tinygrad/tensor.py
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
def rshift(self, x:int):
  """
  Computes right arithmetic shift of `self` by `x` bits. `self` must have unsigned dtype.
  Equivalent to `self >> x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([4, 13, 125], dtype=dtypes.uint8).rshift(2).numpy())
  ```
  """
  assert dtypes.is_unsigned(self.dtype) and isinstance(x, int) and x >= 0, f"not supported {self.dtype=} {x=}"
  return self.idiv(2 ** x)

pow ¤

pow(x: Union[Tensor, ConstType], reverse=False) -> Tensor

Computes power of self with x. Equivalent to self ** x.

print(Tensor([-1, 2, 3]).pow(2).numpy())
[1 4 9]
print(Tensor([-1, 2, 3]).pow(Tensor([-1.5, 0.5, 1.5])).numpy())
[-2147483648           1           5]
print((2 ** Tensor([-1, 2, 3])).numpy())
[0.5 4.  8. ]

Source code in tinygrad/tensor.py
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
def pow(self, x:Union[Tensor, ConstType], reverse=False) -> Tensor:
  """
  Computes power of `self` with `x`.
  Equivalent to `self ** x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).pow(2).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).pow(Tensor([-1.5, 0.5, 1.5])).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print((2 ** Tensor([-1, 2, 3])).numpy())
  ```
  """
  x = self._to_const_val(x)
  if not isinstance(x, Tensor) and not reverse:
    # simple pow identities
    if x < 0: return self.reciprocal().pow(-x).cast(self.dtype)
    if x == 0: return 1 + self * 0
    # rewrite pow 0.5 to sqrt
    if int(x - 0.5) + 0.5 == x: return self.pow(int(x - 0.5)) * self.sqrt()
    if int(x) == x: return self.pow(x // 2).square() * (1 if x % 2 == 0 else self)

  # positive const ** self
  if not isinstance(x, Tensor) and reverse and x > 0: return self.mul(math.log(x)).exp()

  base, exponent = self._broadcasted(x, reverse=reverse)
  # start with b ** e = exp(e * log(b))
  ret = base.abs().log().mul(exponent).exp()
  # correct sign of negative base with odd exponent (cos has a period of 2pi so we use it here to get the oddness of the exponent)
  negative_base = (base < 0).detach().where(1, 0)
  # 1 for non-negative base or negative even exponent, -1 for negative odd exponent, don't care about non-integer exponent
  correct_sign = 1 + negative_base * ((exponent * math.pi).cos() - 1)
  # inject nan for negative base and non-integer exponent
  inject_nan = (negative_base * (exponent != exponent.trunc())).detach().where(math.nan, 1)
  # apply correct_sign inject_nan, and fix 0 ** 0 = 1
  ret = ((base == 0) * (exponent == 0)).detach().where(1, ret * correct_sign * inject_nan)
  return ret.round().cast(self.dtype) if not dtypes.is_float(self.dtype) else ret

maximum ¤

maximum(x: Union[Tensor, ConstType]) -> Tensor

Computes element-wise maximum of self and x.

print(Tensor([-1, 2, 3]).maximum(1).numpy())
[1 2 3]
print(Tensor([-1, 2, 3]).maximum(Tensor([-4, -2, 9])).numpy())
[-1  2  9]

Source code in tinygrad/tensor.py
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
def maximum(self, x:Union[Tensor, ConstType]) -> Tensor:
  """
  Computes element-wise maximum of `self` and `x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).maximum(1).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).maximum(Tensor([-4, -2, 9])).numpy())
  ```
  """
  # NOTE: the mid-point is for backward, revisit after new gradient API
  if self.is_floating_point(): return (self<x).detach().where(x, (self==x).detach().where(((self * 0.5 + x * 0.5).cast(self.dtype)), self))
  return (self<x).detach().where(x, self)

minimum ¤

minimum(x: Union[Tensor, ConstType]) -> Tensor

Computes element-wise minimum of self and x.

print(Tensor([-1, 2, 3]).minimum(1).numpy())
[-1  1  1]
print(Tensor([-1, 2, 3]).minimum(Tensor([-4, -2, 9])).numpy())
[-4 -2  3]

Source code in tinygrad/tensor.py
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
def minimum(self, x:Union[Tensor, ConstType]) -> Tensor:
  """
  Computes element-wise minimum of `self` and `x`.

  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).minimum(1).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print(Tensor([-1, 2, 3]).minimum(Tensor([-4, -2, 9])).numpy())
  ```
  """
  t, x = self._broadcasted(x)
  return t._inverse().maximum(x._inverse())._inverse()

where ¤

where(
    x: Union[Tensor, ConstType, sint],
    y: Union[Tensor, ConstType, sint],
)

Return a tensor of elements selected from either x or y, depending on self. output_i = x_i if self_i else y_i.

cond = Tensor([[True, True, False], [True, False, False]])
print(cond.where(1, 3).numpy())
[[1 1 3]
 [1 3 3]]
Tensor.manual_seed(42)
cond = Tensor.randn(2, 3)
print(cond.numpy())
[[ 0.9779  0.4678  0.5526]
 [-0.3288 -0.8555  0.2753]]
print((cond > 0).where(cond, -float("inf")).numpy())
[[0.9779 0.4678 0.5526]
 [  -inf   -inf 0.2753]]

Source code in tinygrad/tensor.py
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
def where(self:Tensor, x:Union[Tensor, ConstType, sint], y:Union[Tensor, ConstType, sint]):
  """
  Return a tensor of elements selected from either `x` or `y`, depending on `self`.
  `output_i = x_i if self_i else y_i`.

  ```python exec="true" source="above" session="tensor" result="python"
  cond = Tensor([[True, True, False], [True, False, False]])
  print(cond.where(1, 3).numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  Tensor.manual_seed(42)
  cond = Tensor.randn(2, 3)
  print(cond.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  print((cond > 0).where(cond, -float("inf")).numpy())
  ```
  """
  if isinstance(x, Tensor): x, y = x._broadcasted(y)
  elif isinstance(y, Tensor): y, x = y._broadcasted(x)
  cond, x = self._broadcasted(x, match_dtype=False)
  cond, y = cond._broadcasted(y, match_dtype=False)
  return F.Where.apply(cond.cast(dtypes.bool), *x._broadcasted(y))

Casting Ops¤

cast ¤

cast(dtype: DTypeLike) -> Tensor

Casts self to the given dtype.

t = Tensor([-1, 2.5, 3], dtype=dtypes.float)
print(t.dtype, t.numpy())
dtypes.float [-1.   2.5  3. ]
t = t.cast(dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.cast(dtypes.uint8)
print(t.dtype, t.numpy())
dtypes.uchar [255   2   3]

Source code in tinygrad/tensor.py
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
def cast(self, dtype:DTypeLike) -> Tensor:
  """
  Casts `self` to the given `dtype`.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2.5, 3], dtype=dtypes.float)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.cast(dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.cast(dtypes.uint8)
  print(t.dtype, t.numpy())
  ```
  """
  if (dt:=to_dtype(dtype)) in {dtypes.uint8, dtypes.uint16} and dtypes.is_float(self.dtype):
    # NOTE: values within the int32 range and outside the unsigned dtype range will cause values to wrap around
    return F.Cast.apply(F.Cast.apply(self, dtype=dtypes.int32), dtype=dt)
  return self if self.dtype == dt else F.Cast.apply(self, dtype=dt)

bitcast ¤

bitcast(dtype: DTypeLike) -> Tensor

Bitcasts self to the given dtype of the same itemsize.

self must not require a gradient.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.bitcast(dtypes.uint32)
print(t.dtype, t.numpy())
dtypes.uint [4294967295          2          3]

Source code in tinygrad/tensor.py
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
def bitcast(self, dtype:DTypeLike) -> Tensor:
  """
  Bitcasts `self` to the given `dtype` of the same itemsize.

  `self` must not require a gradient.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.bitcast(dtypes.uint32)
  print(t.dtype, t.numpy())
  ```
  """
  if self.requires_grad: raise RuntimeError("can't backprop through bitcast")
  dt = to_dtype(dtype)
  if (not isinstance(self.device, str) or not self.device.startswith("DISK")) and (ns:=dt.itemsize) != (os:=self.dtype.itemsize):
    if (self.shape[-1]*os) % ns != 0: raise RuntimeError("unsupported size in bitcast")
    new_uint, old_uint = to_dtype(f"uint{8*ns}"), to_dtype(f"uint{8*os}")
    tmp = self.bitcast(old_uint)
    if ns > os: return functools.reduce(Tensor.add, (tmp[..., i::ns//os].cast(new_uint) << 8*i*os for i in range(ns//os))).bitcast(dtype)
    return Tensor.stack(*(tmp>>8*i*ns for i in range(os//ns)), dim=-1).flatten(-2).cast(new_uint).bitcast(dtype)
  return F.Cast.apply(self, dtype=dt, bitcast=True) if self.dtype != dt else self

float ¤

float() -> Tensor

Convenience method to cast self to a float32 Tensor.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.float()
print(t.dtype, t.numpy())
dtypes.float [-1.  2.  3.]

Source code in tinygrad/tensor.py
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
def float(self) -> Tensor:
  """
  Convenience method to cast `self` to a `float32` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.float()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.float32)

half ¤

half() -> Tensor

Convenience method to cast self to a float16 Tensor.

t = Tensor([-1, 2, 3], dtype=dtypes.int32)
print(t.dtype, t.numpy())
dtypes.int [-1  2  3]
t = t.half()
print(t.dtype, t.numpy())
dtypes.half [-1.  2.  3.]

Source code in tinygrad/tensor.py
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
def half(self) -> Tensor:
  """
  Convenience method to cast `self` to a `float16` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 2, 3], dtype=dtypes.int32)
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.half()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.float16)

int ¤

int() -> Tensor

Convenience method to cast self to a int32 Tensor.

t = Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])
print(t.dtype, t.numpy())
dtypes.float [-1.5 -0.5  0.   0.5  1.5]
t = t.int()
print(t.dtype, t.numpy())
dtypes.int [-1  0  0  0  1]

Source code in tinygrad/tensor.py
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
def int(self) -> Tensor:
  """
  Convenience method to cast `self` to a `int32` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1.5, -0.5, 0.0, 0.5, 1.5])
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.int()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.int32)

bool ¤

bool() -> Tensor

Convenience method to cast self to a bool Tensor.

t = Tensor([-1, 0, 1])
print(t.dtype, t.numpy())
dtypes.int [-1  0  1]
t = t.bool()
print(t.dtype, t.numpy())
dtypes.bool [ True False  True]

Source code in tinygrad/tensor.py
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
def bool(self) -> Tensor:
  """
  Convenience method to cast `self` to a `bool` Tensor.

  ```python exec="true" source="above" session="tensor" result="python"
  t = Tensor([-1, 0, 1])
  print(t.dtype, t.numpy())
  ```
  ```python exec="true" source="above" session="tensor" result="python"
  t = t.bool()
  print(t.dtype, t.numpy())
  ```
  """
  return self.cast(dtypes.bool)