[{"data":1,"prerenderedAt":1059},["ShallowReactive",2],{"i-kinnu:logo":3,"i-kinnu:origami-folding":8,"tile-technology-artificial-intelligence-generative-ai":12,"i-lucide:chevron-right":1054,"i-lucide:menu":1057},{"left":4,"top":4,"width":5,"height":5,"rotate":4,"vFlip":6,"hFlip":6,"body":7},0,27,false,"\u003Cg fill=\"none\">\u003Cpath d=\"M0.046875 1.05555C0.046875 1.03541 0.048197 1.01579 0.0507438 0.996728C0.0987149 0.438619 0.586845 0 1.18194 0H25.4398C26.451 0 26.9575 1.171 26.2424 1.85585L15.7301 11.9243L1.31574 0.903476C1.17475 0.79568 1.01137 0.761884 0.859586 0.784111L26.2936 25.1441C27.0086 25.829 26.5022 27 25.4909 27H1.18194C0.555061 27 0.046875 26.5133 0.046875 25.9129V1.05555Z\" fill=\"currentColor\"/>\u003C/g>",{"left":4,"top":4,"width":9,"height":10,"rotate":4,"vFlip":6,"hFlip":6,"body":11},1000,236,"\u003Cg fill=\"none\">\u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M193.68 38.2238C195.994 38.2238 197.87 40.0989 197.87 42.412V231.812C197.87 234.125 195.994 236 193.68 236H4.19013C1.87603 236 2.02305e-07 234.125 0 231.812V42.412C-2.02305e-07 40.0989 1.87603 38.2238 4.19013 38.2238H193.68ZM111.76 89.0072C111.685 87.9474 110.572 87.2905 109.608 87.7376L96.8872 93.641C95.7786 94.1554 95.702 95.7016 96.7545 96.3225L101.579 99.167C94.7045 109.365 90.5733 122.892 90.5732 137.642C90.5733 154.323 95.8569 169.439 104.416 179.945C105.301 181.032 106.9 181.196 107.987 180.311C109.075 179.426 109.238 177.828 108.353 176.741C100.621 167.25 95.6522 153.305 95.6521 137.642C95.6522 123.661 99.6138 111.051 105.963 101.754L110.456 104.403C111.508 105.024 112.826 104.21 112.74 102.991L111.76 89.0072ZM9.63194 136.286C9.14864 136.286 8.75684 136.678 8.75684 137.161C8.7569 137.644 9.14868 138.035 9.63194 138.035H17.2161C17.6993 138.035 18.0912 137.644 18.0912 137.161C18.0912 136.678 17.6994 136.286 17.2161 136.286H9.63194ZM22.6813 136.286C22.198 136.286 21.8062 136.678 21.8062 137.161C21.8063 137.644 22.1981 138.035 22.6813 138.035H30.2655C30.7487 138.035 31.1406 137.644 31.1406 137.161C31.1406 136.678 30.7488 136.286 30.2655 136.286H22.6813ZM35.7464 136.286C35.2631 136.286 34.8713 136.678 34.8713 137.161C34.8713 137.644 35.2631 138.035 35.7464 138.035H44.4973C44.9805 138.035 45.3724 137.644 45.3724 137.161C45.3724 136.678 44.9806 136.286 44.4973 136.286H35.7464ZM49.9977 136.286C49.5144 136.286 49.1226 136.678 49.1226 137.161C49.1226 137.644 49.5144 138.035 49.9977 138.035H57.5819C58.0651 138.035 58.4569 137.644 58.457 137.161C58.457 136.678 58.0651 136.286 57.5819 136.286H49.9977ZM63.0783 136.286C62.595 136.286 62.2032 136.678 62.2032 137.161C62.2033 137.644 62.5951 138.035 63.0783 138.035H70.6625C71.1457 138.035 71.5375 137.644 71.5376 137.161C71.5376 136.678 71.1457 136.286 70.6625 136.286H63.0783ZM76.1277 136.286C75.6444 136.286 75.2526 136.678 75.2526 137.161C75.2527 137.644 75.6445 138.035 76.1277 138.035H83.7119C84.1951 138.035 84.5869 137.644 84.587 137.161C84.587 136.678 84.1951 136.286 83.7119 136.286H76.1277ZM102.266 136.286C101.782 136.286 101.39 136.678 101.39 137.161C101.391 137.644 101.782 138.035 102.266 138.035H109.85C110.333 138.035 110.725 137.644 110.725 137.161C110.725 136.678 110.333 136.286 109.85 136.286H102.266ZM115.338 136.286C114.855 136.286 114.463 136.678 114.463 137.161C114.463 137.644 114.855 138.035 115.338 138.035H122.923C123.406 138.035 123.798 137.644 123.798 137.161C123.798 136.678 123.406 136.286 122.923 136.286H115.338ZM128.403 136.286C127.92 136.286 127.528 136.678 127.528 137.161C127.528 137.644 127.92 138.035 128.403 138.035H135.988C136.471 138.035 136.863 137.644 136.863 137.161C136.863 136.678 136.471 136.286 135.988 136.286H128.403ZM141.468 136.286C140.985 136.286 140.593 136.678 140.593 137.161C140.593 137.644 140.985 138.035 141.468 138.035H149.053C149.536 138.035 149.928 137.644 149.928 137.161C149.928 136.678 149.536 136.286 149.053 136.286H141.468ZM154.541 136.286C154.058 136.286 153.666 136.678 153.666 137.161C153.666 137.644 154.058 138.035 154.541 138.035H162.125C162.609 138.035 163 137.644 163.001 137.161C163.001 136.678 162.609 136.286 162.125 136.286H154.541ZM167.614 136.286C167.131 136.286 166.739 136.678 166.739 137.161C166.739 137.644 167.131 138.035 167.614 138.035H175.198C175.681 138.035 176.073 137.644 176.073 137.161C176.073 136.678 175.681 136.286 175.198 136.286H167.614ZM180.671 136.286C180.188 136.286 179.796 136.678 179.796 137.161C179.796 137.644 180.188 138.035 180.671 138.035H188.255C188.739 138.035 189.13 137.644 189.131 137.161C189.131 136.678 188.739 136.286 188.255 136.286H180.671Z\"\n    fill=\"currentColor\" />\n  \u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M444.85 38.2277C447.164 38.2277 449.04 40.1028 449.04 42.4159V132.928C449.04 135.241 447.164 137.116 444.85 137.116H255.36C253.046 137.116 251.17 135.241 251.17 132.928V42.4159C251.17 40.1028 253.046 38.2277 255.36 38.2277H444.85ZM361.96 125.388C361.618 125.046 361.064 125.046 360.722 125.388L354.534 131.572C354.192 131.914 354.192 132.468 354.534 132.81C354.876 133.151 355.43 133.151 355.772 132.81L361.96 126.624C362.301 126.283 362.301 125.73 361.96 125.388ZM371.047 116.311C370.705 115.969 370.15 115.969 369.809 116.311L364.446 121.671C364.104 122.012 364.104 122.567 364.446 122.908C364.788 123.249 365.342 123.25 365.684 122.908L371.047 117.548C371.388 117.207 371.388 116.652 371.047 116.311ZM380.124 107.246C379.782 106.904 379.227 106.904 378.885 107.246L373.523 112.606C373.181 112.948 373.181 113.502 373.523 113.844C373.864 114.185 374.419 114.185 374.761 113.844L380.124 108.483C380.465 108.142 380.465 107.587 380.124 107.246ZM385.736 65.8841C385.891 64.6727 384.622 63.7845 383.536 64.3434L371.069 70.7636C370.124 71.2504 369.96 72.5334 370.752 73.2424L381.2 82.5938C382.11 83.4081 383.561 82.8672 383.717 81.6557L384.393 76.3725C391.143 77.1933 398.567 80.7709 404.771 86.9711C411.124 93.3213 414.726 100.952 415.43 107.827C415.573 109.221 416.819 110.236 418.214 110.093C419.609 109.95 420.624 108.703 420.481 107.309C419.644 99.1317 415.435 90.4514 408.362 83.3817C401.466 76.489 393.038 72.3185 385.038 71.338L385.736 65.8841ZM389.2 98.1733C388.859 97.8319 388.304 97.8318 387.962 98.1733L382.6 103.534C382.258 103.875 382.258 104.429 382.6 104.771C382.941 105.112 383.496 105.112 383.838 104.771L389.2 99.4108C389.542 99.0693 389.542 98.5149 389.2 98.1733ZM398.262 89.1047C397.92 88.7633 397.365 88.7632 397.024 89.1047L391.661 94.4649C391.319 94.8065 391.319 95.3608 391.661 95.7024C392.002 96.0436 392.557 96.0438 392.899 95.7024L398.262 90.3421C398.603 90.0007 398.603 89.4463 398.262 89.1047ZM416.431 70.9616C416.089 70.6202 415.534 70.6201 415.193 70.9616L409.83 76.3218C409.488 76.6634 409.488 77.2177 409.83 77.5592C410.172 77.9005 410.726 77.9007 411.068 77.5592L416.431 72.199C416.772 71.8575 416.772 71.3032 416.431 70.9616ZM425.508 61.891C425.166 61.5496 424.611 61.5495 424.27 61.891L418.907 67.2512C418.565 67.5928 418.565 68.1471 418.907 68.4887C419.249 68.8299 419.803 68.8301 420.145 68.4887L425.508 63.1284C425.849 62.787 425.849 62.2326 425.508 61.891ZM434.569 52.8146C434.227 52.4731 433.673 52.4731 433.331 52.8146L427.968 58.1748C427.626 58.5163 427.627 59.0706 427.968 59.4122C428.31 59.7534 428.864 59.7537 429.206 59.4122L434.569 54.052C434.91 53.7105 434.91 53.1562 434.569 52.8146ZM443.638 43.7479C443.296 43.4065 442.742 43.4064 442.4 43.7479L437.037 49.1081C436.695 49.4496 436.696 50.004 437.037 50.3455C437.379 50.6868 437.933 50.687 438.275 50.3455L443.638 44.9853C443.98 44.6438 443.979 44.0895 443.638 43.7479Z\"\n    fill=\"currentColor\" />\n  \u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M684.066 38.2277C687.798 38.2281 689.667 42.7391 687.027 45.3773L596.473 135.889C595.687 136.675 594.621 137.116 593.51 137.116H506.335C504.021 137.116 502.145 135.241 502.145 132.928V42.4159C502.145 40.1028 504.021 38.2277 506.335 38.2277H684.066ZM514.603 124.566C514.261 124.224 513.707 124.224 513.365 124.566L507.178 130.751C506.836 131.093 506.836 131.646 507.178 131.988C507.519 132.329 508.073 132.329 508.415 131.988L514.603 125.803C514.945 125.462 514.945 124.908 514.603 124.566ZM523.689 115.491C523.348 115.15 522.794 115.15 522.452 115.491L517.09 120.852C516.748 121.193 516.748 121.747 517.09 122.088C517.431 122.43 517.985 122.43 518.327 122.088L523.689 116.728C524.031 116.386 524.031 115.833 523.689 115.491ZM532.102 65.8295C530.707 65.6872 529.46 66.7017 529.318 68.0957C529.175 69.4896 530.189 70.7355 531.584 70.8787C538.463 71.5825 546.096 75.1826 552.45 81.5329C558.723 87.8037 562.312 95.3226 563.079 102.13L557.738 102.392C556.518 102.452 555.865 103.855 556.607 104.827L565.115 115.969C565.76 116.814 567.051 116.751 567.611 115.847L574.992 103.928C575.635 102.889 574.848 101.555 573.628 101.615L568.161 101.882C568.161 101.878 568.162 101.874 568.161 101.871C567.324 93.6931 563.114 85.0124 556.041 77.9425C548.968 70.873 540.283 66.6668 532.102 65.8295ZM532.766 106.421C532.425 106.079 531.871 106.079 531.529 106.421L526.166 111.781C525.825 112.123 525.825 112.676 526.166 113.018C526.508 113.359 527.062 113.359 527.403 113.018L532.766 107.657C533.108 107.316 533.108 106.762 532.766 106.421ZM541.843 97.3445C541.501 97.003 540.948 97.003 540.606 97.3445L535.243 102.705C534.901 103.046 534.902 103.6 535.243 103.941C535.585 104.283 536.139 104.283 536.48 103.941L541.843 98.5809C542.185 98.2393 542.185 97.686 541.843 97.3445ZM550.92 88.2778C550.578 87.9363 550.025 87.9363 549.683 88.2778L544.32 93.638C543.978 93.9796 543.978 94.5329 544.32 94.8745C544.662 95.2161 545.215 95.2161 545.557 94.8745L550.92 89.5142C551.262 89.1727 551.262 88.6193 550.92 88.2778ZM569.066 70.1405C568.724 69.799 568.17 69.7991 567.829 70.1405L562.466 75.5008C562.124 75.8423 562.124 76.3956 562.466 76.7372C562.808 77.0788 563.361 77.0788 563.703 76.7372L569.066 71.377C569.407 71.0354 569.407 70.4821 569.066 70.1405ZM578.143 61.0699C577.801 60.7284 577.247 60.7285 576.906 61.0699L571.543 66.4302C571.201 66.7717 571.201 67.3251 571.543 67.6666C571.885 68.0082 572.438 68.0082 572.78 67.6666L578.143 62.3064C578.484 61.9648 578.484 61.4115 578.143 61.0699ZM587.219 51.9896C586.878 51.6481 586.324 51.6481 585.982 51.9896L580.62 57.3498C580.278 57.6914 580.278 58.2447 580.62 58.5863C580.961 58.9279 581.515 58.9279 581.857 58.5863L587.219 53.2261C587.561 52.8845 587.561 52.3312 587.219 51.9896ZM596.288 42.9249C595.947 42.5833 595.392 42.5833 595.05 42.9249L589.689 48.2851C589.347 48.6267 589.347 49.18 589.689 49.5216C590.03 49.863 590.584 49.8631 590.926 49.5216L596.288 44.1613C596.63 43.8198 596.63 43.2664 596.288 42.9249Z\"\n    fill=\"currentColor\" />\n  \u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M850.814 38.2277C854.547 38.2281 856.416 42.739 853.777 45.3773L763.223 135.889C762.437 136.674 761.371 137.116 760.26 137.116H673.176C669.443 137.116 667.574 132.605 670.213 129.966L760.768 39.4544C761.554 38.6692 762.62 38.2277 763.731 38.2277H850.814ZM761.338 121.8C760.855 121.8 760.463 122.191 760.463 122.674V131.13H762.213V122.674C762.213 122.191 761.821 121.8 761.338 121.8ZM761.338 108.971C760.855 108.971 760.463 109.363 760.463 109.846V118.301H762.213V109.846C762.213 109.363 761.821 108.971 761.338 108.971ZM761.338 96.1402C760.855 96.1406 760.463 96.5321 760.463 97.0149V105.47H762.213V97.0149C762.213 96.532 761.821 96.1404 761.338 96.1402ZM782.263 71.887C781.043 71.951 780.395 73.3571 781.139 74.3257L784.474 78.6631C779.115 82.951 771.242 85.7443 762.35 85.7444C753.366 85.7442 745.421 82.8944 740.059 78.5305C738.972 77.6461 737.373 77.8099 736.488 78.8961C735.602 79.983 735.766 81.582 736.853 82.467C743.231 87.6574 752.348 90.8207 762.35 90.8209C772.209 90.8208 781.205 87.746 787.568 82.6884L790.833 86.9341C791.577 87.9025 793.103 87.6391 793.479 86.4767L797.791 73.138C798.118 72.127 797.33 71.1017 796.268 71.1566L782.263 71.887ZM761.338 70.4847C760.855 70.4851 760.463 70.8767 760.463 71.3594V79.8147H762.213V71.3594C762.213 70.8766 761.821 70.485 761.338 70.4847ZM761.338 57.656C760.855 57.6564 760.463 58.048 760.463 58.5307V66.986H762.213V58.5307C762.213 58.0479 761.821 57.6563 761.338 57.656ZM761.338 44.8293C760.855 44.8297 760.463 45.2212 760.463 45.704V54.1592H762.213V45.704C762.213 45.2211 761.821 44.8295 761.338 44.8293Z\"\n    fill=\"currentColor\" />\n  \u003Cpath\n    d=\"M995.759 38.2277C999.53 38.228 1001.42 42.5171 998.752 45.0253L959.55 81.9005L905.796 41.5363C905.271 41.1418 904.662 41.0182 904.096 41.0994L997.485 130.319C1000.15 132.828 998.262 137.116 994.491 137.116H905.298C902.96 137.116 901.065 135.333 901.065 133.134V42.0941C901.065 42.0204 901.07 41.9483 901.079 41.8786C901.258 39.8345 903.079 38.2277 905.298 38.2277H995.759Z\"\n    fill=\"currentColor\" />\n  \u003Cpath\n    d=\"M505.873 0C506.657 4.57042e-05 507.307 0.195499 507.823 0.587023C508.338 0.969046 508.596 1.53802 508.596 2.29251C508.596 2.76034 508.467 3.19015 508.209 3.58162C507.951 3.96344 507.497 4.26401 506.848 4.48361V4.54114C507.65 4.67487 508.205 4.96191 508.51 5.4012C508.816 5.83087 508.969 6.31772 508.969 6.86193C508.969 7.74056 508.672 8.41851 508.08 8.89604C507.497 9.38304 506.733 9.62731 505.787 9.62738C504.861 9.62738 504.158 9.42172 503.68 9.0111C503.212 8.60054 502.935 8.08005 502.849 7.44993L503.881 7.10571L503.924 7.24028C504.035 7.54934 504.211 7.82925 504.454 8.07986C504.731 8.36635 505.166 8.50986 505.758 8.50989C506.465 8.50989 506.943 8.32772 507.191 7.9648C507.449 7.6019 507.579 7.20078 507.579 6.7615C507.579 6.2173 507.378 5.80683 506.977 5.52992C506.585 5.25295 505.93 5.10026 505.013 5.07161V4.15402C505.901 4.12537 506.489 3.92484 506.776 3.55237C507.062 3.18009 507.206 2.82242 507.206 2.47876C507.206 1.62801 506.752 1.17539 505.845 1.12237L505.658 1.11749C505.467 1.11752 505.242 1.14605 504.985 1.2033C504.736 1.25105 504.511 1.3274 504.31 1.43245L504.081 2.56457L503.05 2.44951L503.322 0.687461C503.666 0.49653 504.068 0.33454 504.526 0.200875C504.985 0.0671945 505.434 0 505.873 0Z\"\n    fill=\"currentColor\" />\n  \u003Cpath\n    d=\"M905.727 2.30616L904.638 2.4066L904.466 1.26083H901.428V3.72497C901.533 3.71544 901.643 3.71034 901.757 3.71034H902.086C902.755 3.71034 903.386 3.78668 903.979 3.93949C904.58 4.09229 905.068 4.38363 905.44 4.8132C905.822 5.23335 906.014 5.84949 906.014 6.66106C906.014 7.64468 905.722 8.38068 905.14 8.86776C904.557 9.36434 903.783 9.6127 902.818 9.61275C901.91 9.61275 901.213 9.40711 900.725 8.99648C900.248 8.59544 899.96 8.08007 899.865 7.44993L900.911 7.10571C901.007 7.49723 901.203 7.8271 901.499 8.09449C901.795 8.37131 902.211 8.50985 902.746 8.50989C903.395 8.50989 903.869 8.33787 904.165 7.99405C904.461 7.65981 904.609 7.22507 904.609 6.69031C904.609 5.87861 904.337 5.3625 903.792 5.14279C903.248 4.91361 902.612 4.79958 901.886 4.79955C901.695 4.79955 901.489 4.80365 901.27 4.8132C901.059 4.82275 900.854 4.83701 900.653 4.85611L900.224 4.44071V0.143343H905.569L905.727 2.30616Z\"\n    fill=\"currentColor\" />\n  \u003Cpath fill-rule=\"evenodd\" clip-rule=\"evenodd\"\n    d=\"M765.49 6.04576H766.966L766.837 7.14862H765.49V9.48404H764.185V7.14862H759.857L759.713 6.04576L762.909 0.143343H765.49V6.04576ZM760.96 6.04576H764.185V1.26083H763.541L760.96 6.04576Z\"\n    fill=\"currentColor\" />\n  \u003Cpath d=\"M4.80573 6.47481H6.41154V7.60693H1.81068V6.47481H3.50235V1.27546H1.81068V0.143343H4.80573V6.47481Z\"\n    fill=\"currentColor\" />\n  \u003Cpath\n    d=\"M254.359 0C255.353 0 256.055 0.239186 256.466 0.716715C256.877 1.18447 257.083 1.68072 257.083 2.20573C257.083 2.85516 256.849 3.44346 256.38 3.96875C255.912 4.49397 255.348 4.96638 254.689 5.38657C254.039 5.79717 253.437 6.15968 252.883 6.47481H256.423L256.538 5.42948L257.599 5.51529L257.426 7.60693H251.407L251.292 6.58987C252.582 5.73032 253.638 4.98523 254.46 4.35489C255.281 3.71509 255.693 3.05632 255.693 2.37832C255.693 1.53787 255.166 1.11749 254.115 1.12237L254.115 1.11749C253.924 1.11754 253.695 1.14604 253.427 1.2033C253.16 1.25104 252.916 1.32238 252.697 1.41783L252.467 2.47876L251.45 2.3637L251.707 0.60165C252.118 0.401088 252.563 0.253475 253.041 0.15797C253.519 0.0529708 253.958 1.99446e-05 254.359 0Z\"\n    fill=\"currentColor\" />\u003C/g>",{"tile":13,"orbsWithOnlyMarkdownPages":408},{"id":14,"data":15,"type":16,"maxContentLevel":19,"version":20,"orbs":21},"6444ca2e-c123-4189-a7d7-26bca51e8793",{"type":16,"title":17,"tagline":18},9,"Generative AI","How do computers create?",7,4,[22,160,217,313],{"id":23,"data":24,"type":25,"version":27,"maxContentLevel":28,"summaryPage":29,"introPage":37,"pages":44},"9011ca88-b15f-44ea-b709-326543c9b0e5",{"type":25,"title":26},2,"Large Language Models",1,3,{"id":30,"data":31,"type":28,"maxContentLevel":28,"version":27},"9bf46eb7-d453-4fab-a2eb-cd2ad207d5fd",{"type":28,"summary":32},[33,34,35,36],"Generative AI can create something new after learning from existing data","Large Language Models, like ChatGPT, specialize in natural language generation","These models are trained on massive amounts of data, typically taken from the internet","Large Language Models use transformer architecture to improve the quality of their outputs",{"id":38,"data":39,"type":40,"maxContentLevel":28,"version":27},"31a74e46-74b0-4ad8-8eb8-917e8431d310",{"type":40,"intro":41},10,[42,43],"How are Large Language Models trained to generate language?","Where do Large Language Models get their data from?",[45,63,87,113],{"id":46,"data":47,"type":27,"maxContentLevel":28,"version":27,"reviews":50},"897bd1b0-60eb-41cd-b8a9-0d37386b5ea1",{"type":27,"contentRole":25,"markdownContent":48,"audioMediaId":49},"In the last few years, **Generative AI** (or Gen AI, for short) has become a bit of a buzzword.\n\nIt's used to describe any AI model which can study a dataset, then use what it's learned to create something totally new. For example, it might study a thousand paintings, then generate a painting of its own. Or it might study a million bedtime stories, then write a new one completely from scratch.\n\nTo give you some context, this is totally different to something like a descriptive AI, which explains and classifies data. Where descriptive AI is designed to explain, generative AI is specifically designed to **create**.","96e31dc3-f986-4ef7-a321-cd9746c012b4",[51],{"id":52,"data":53,"type":54,"version":27,"maxContentLevel":28},"624bd74d-b491-4cc4-b4c4-c077d745b373",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":55,"multiChoiceCorrect":57,"multiChoiceIncorrect":59,"multiChoiceMultiSelect":6,"multiChoiceRevealAnswerOption":6},11,[56],"Generative AI can study a dataset, then use what it's learned to:",[58],"Create something new",[60,61,62],"Describe the dataset","Categorize the dataset","Predict future trends",{"id":64,"data":65,"type":27,"maxContentLevel":28,"version":27,"reviews":68},"0ced03ac-51bd-4d3f-b942-eaa0c957f277",{"type":27,"contentRole":25,"markdownContent":66,"audioMediaId":67},"The most famous example of generative AI is ChatGPT, which exploded onto the global scene towards the end of 2022. It's an example of a particular subtype of AI, which is known as a **Large Language Model (LLM)**.\n\nWe mentioned LLMs last time. They're essentially just complex neural networks which specialize in natural language. As a general rule, we call it a 'Large' Language Model when it uses a billion parameters. The latest version of ChatGPT is said to have more than a trillion.\n\nThese vast neural networks are trained on a colossal volume of data, sometimes called a corpus. This corpus might be made up of news articles, text messages, song lyrics, novels, screenplays, textbooks, emails, poems...\n\nSome of the world's most powerful LLMs were literally trained using all the data on the internet.\n\n![Graph](image://89700d4f-675f-4c9b-8239-f9ddeb49b3c7 \"Data. Image via Pexels\")","d0964f67-9cef-4454-9118-fde5c748ea83",[69,80],{"id":70,"data":71,"type":54,"version":27,"maxContentLevel":28},"d1d2dc9d-8c34-446b-b87c-6ed8e2223421",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":72,"multiChoiceCorrect":74,"multiChoiceIncorrect":76,"multiChoiceMultiSelect":6,"multiChoiceRevealAnswerOption":6},[73],"As a general rule, a Large Language Model has how many parameters (or more)?",[75],"One billion",[77,78,79],"One thousand","One million","One trillion",{"id":81,"data":82,"type":54,"version":27,"maxContentLevel":28},"97f84338-fc75-4cc4-8984-6e1ee61e4edb",{"type":54,"reviewType":20,"spacingBehaviour":27,"clozeQuestion":83,"clozeWords":85},[84],"LLMs are trained on a vast amount of data, which is sometimes known as a corpus.",[86],"corpus",{"id":88,"data":89,"type":27,"maxContentLevel":28,"version":27,"reviews":92},"ec0f631c-2c0d-45f8-bd71-dbe4d666ee68",{"type":27,"contentRole":25,"markdownContent":90,"audioMediaId":91},"You might be wondering what 'GPT' stands for: Generative Pre-trained Transformer.\n\n'Generative' and 'pre-trained' are self-explanatory. But **transformer** is a term that you might not have seen before. It refers to a specific learning framework that helps to improve the quality of an LLM's outputs.\n\nWe won't get into all the details. But in simple terms, transformer architecture uses mathematical models to keep track of the overall context of a text, rather than just the last few words it has written.\n\nFor example, if you used an LLM to write an email, and the first sentence mentioned the recipient's name, the transformer architecture helps it remember this context, and keep using that name throughout the rest of the email.\n\nIt's worth pointing out that ChatGPT isn't the only transformer LLM on the market. Anthropic's Claude is another example. So is Google's Gemini.","e572a7c0-3059-4f15-a61d-ff10b0cd96c7",[93],{"id":94,"data":95,"type":54,"version":27,"maxContentLevel":28},"ba4e79ac-ecb6-4193-a7e9-dd48e8280af9",{"type":54,"reviewType":28,"spacingBehaviour":27,"collapsingSiblings":96,"multiChoiceQuestion":100,"multiChoiceCorrect":102,"multiChoiceIncorrect":104,"multiChoiceMultiSelect":6,"multiChoiceRevealAnswerOption":6,"matchPairsQuestion":108,"matchPairsPairs":110},[97,98,99],"9ee9668b-96fd-4dd0-9dcf-8f4ab8e40111","7e7ae376-1c24-4dfd-863b-9afbebc6a364","1cb45017-3c6a-430b-8344-a2c9984a91af",[101],"What type of LLM can keep track of the overall context of a text, rather than just the last few words it has written?",[103],"Transformer",[105,106,107],"Transceiver","Performer","Perceiver",[109],"Match the pairs below:",[111],{"left":103,"right":112,"direction":28},"LLM that keeps track of context",{"id":114,"data":115,"type":27,"maxContentLevel":28,"version":27,"reviews":118},"500ffc88-e175-4d53-8750-3b448f3b5583",{"type":27,"contentRole":25,"markdownContent":116,"audioMediaId":117},"It has to be said: even the world's best LLMs still struggle to compete with human writing in more expressive or artistic contexts. They lack the kind of empathy and emotion which the very best writing is based on.\n\nBut for simple writing, LLMs are extremely powerful tools. You can use them to write emails, or summarize articles, or rephrase complicated text. You can use them to draft to-do lists, or write computer code, or even just have a quick chat.\n\nInterestingly, the latest version of ChatGPT has also been trained to work with images and audio. In other words, it's no longer strictly an LLM – we can call this model **multimodal**, which basically means it can perform more than one task.","f70d310a-d9fa-4c60-b119-e9799fbba8a1",[119,133,144,153],{"id":120,"data":121,"type":54,"version":27,"maxContentLevel":28},"4c31dd78-37ff-473f-989e-83c888ebe3d9",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":122,"multiChoiceCorrect":124,"multiChoiceIncorrect":128,"multiChoiceMultiSelect":132,"multiChoiceRevealAnswerOption":6},[123],"Which of these are examples of transformer LLMs?",[125,126,127],"ChatGPT (OpenAI)","Claude (Anthropic)","Gemini (Google)",[129,130,131],"Stable Diffusion (Stability AI)","Midjourney (Midjourney Inc.)","DALL-E (OpenAI)",true,{"id":134,"data":135,"type":54,"version":27,"maxContentLevel":28},"f6a5c31a-483b-4352-9a58-4bc53302f4c7",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":136,"multiChoiceCorrect":138,"multiChoiceIncorrect":140,"multiChoiceMultiSelect":6,"multiChoiceRevealAnswerOption":6},[137],"What does GPT stand for?",[139],"Generative Pre-trained Transformer",[141,142,143],"Graphical Pattern Transformer","Generative Processing Tool","None of these",{"id":145,"data":146,"type":54,"version":27,"maxContentLevel":28},"521714f9-17eb-4d1d-b4a7-dc48d75b13a2",{"type":54,"reviewType":25,"spacingBehaviour":27,"binaryQuestion":147,"binaryCorrect":149,"binaryIncorrect":151},[148],"The latest version of ChatGPT has been trained to work with images and audio, as well as text. What do we call a model like this one?",[150],"Multimodal AI",[152],"General AI",{"id":154,"data":155,"type":54,"version":27,"maxContentLevel":28},"b04c2ed8-aa7a-4de4-a442-9e6c5a7e6777",{"type":54,"reviewType":27,"spacingBehaviour":27,"activeRecallQuestion":156,"activeRecallAnswers":158},[157],"Why might an LLM struggle to compete with human writing in more expressive or artistic contexts?",[159],"These models lack empathy and emotion",{"id":161,"data":162,"type":25,"version":25,"maxContentLevel":28,"summaryPage":164,"introPage":170,"pages":175},"f9b2f01b-d04c-4ac1-83b4-577e035db3e8",{"type":25,"title":163},"Case study: Turing Test",{"id":165,"data":166,"type":28,"maxContentLevel":28,"version":27},"09353fb5-0670-4152-a6fd-a25b27882e42",{"type":28,"summary":167},[168,169],"In a recent Turing Test study, ChatGPT was misidentified as a human 54% of the time ","In another Turing Test study, to Eliza which was only misidentified 22% of the time",{"id":171,"data":172,"type":40,"maxContentLevel":28,"version":27},"308f6655-e382-42b6-808f-7f595a29916f",{"type":40,"intro":173},[174],"Can you tell the different between ChatGPT and a human?",[176,181,186,191],{"id":177,"data":178,"type":27,"maxContentLevel":28,"version":27},"41862024-9713-42aa-ae9f-ec082c8de46c",{"type":27,"contentRole":25,"markdownContent":179,"audioMediaId":180},"Before moving on from LLMs, here's an interesting question. Is an AI model like ChatGPT capable of passing the Turing Test? In this orb, we'll find out firsthand.\n\nThere are a few variations of the Turing Test, but we'll be using one that looks like this. A machine and a human have a conversation. A second human (that's you!) reads the messages. At the end, you'll be asked a question: of the two participants, can you tell which one was a machine?\n\nThe conversation will start on the page after this one. The participants are labeled Person A and Person B. But just to repeat: only one is a real person.","f5dfac3a-691d-4a30-a7d6-a924e9d75c82",{"id":182,"data":183,"type":27,"maxContentLevel":28,"version":27},"3b554e91-97ad-4613-a074-cf13dafef9a1",{"type":27,"contentRole":25,"markdownContent":184,"audioMediaId":185},"**Person A**: \"Hey, quick question – if robots had favorite movies, what kind of films do you think they’d be into?\"\n\n**Person B**: \"Interesting. Honestly, I have no idea. Science fiction, maybe? They'd probably like seeing themselves on screen.\"\n\n**Person A**: \"Yeah, I can see that. Probably big fans of The Terminator or Wall-E, right? Imagine a robot getting super invested in robot characters.\"\n\n**Person B**: \"Ha, yeah. Like, deciding that the Terminator was the ideal role model. Or developing a crush on Wall-E.\"\n\n**Person A**: \"Exactly! A robot trying to act all cool like the Terminator... but then secretly fangirling over Wall-E’s personality. That would be hilarious.\"\n\n**Conversation continued on next page.**","382aaddd-18a3-4429-8f85-232d0452d779",{"id":187,"data":188,"type":27,"maxContentLevel":28,"version":27},"9ae2319a-dc54-4c08-b1e2-d56fc451145e",{"type":27,"contentRole":25,"markdownContent":189,"audioMediaId":190},"**Person B**: \"Yep, it sure would be! How about you, though? Do you agree that robots would like science fiction films, or is there another genre they might prefer?\"\n\n**Person A**: \"Hmm, I feel like robots might also get into documentaries, you know? Something all factual and logical, like ‘How It's Made.’ Maybe they’d enjoy learning about how the world works – or how they work.\"\n\n**Person B**: \"I like this. It's like a human watching a documentary about the digestive system. It's important to know how your body works.\"\n\n**Conversation end.**\n\nThere you have it. A conversation between a machine and a human. But which one is which? Take a moment to think. Make a guess in your mind. When you're ready to find out the actual answer, please continue to the page after this one.","34df030b-f3a3-429a-a99a-b24a7a8e7aba",{"id":192,"data":193,"type":27,"maxContentLevel":28,"version":25,"reviews":196},"476ba5da-53ba-4729-a2f3-fd0d58e43363",{"type":27,"contentRole":25,"markdownContent":194,"audioMediaId":195},"Here's the answer: Person A was actually ChatGPT, while person B was a real-life human.\n\nMaybe you guessed that correctly. Maybe you got it wrong. But either way, it probably wasn't easy. Would you have been confident enough to bet your savings on it?\n\nJust to be clear, none of this means that ChatGPT is sentient, or conscious, or an Artificial General Intelligence. It's still nothing more than an Artificial Narrow Intelligence using a neural network to mimic natural language.\n\nBut it does this very effectively. In one recent study, using a similar test, GPT was misidentified as a human 54% of the time. Compare that to something like the Eliza chatbot, way back in the 1960s. In the same test, it was only misidentified 22% of the time.","9d9aeb87-3567-4688-9aad-f62917d01512",[197,208],{"id":198,"data":199,"type":54,"version":25,"maxContentLevel":28},"21ebd094-6e73-4bc9-8210-7694be22a0e0",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":200,"multiChoiceCorrect":202,"multiChoiceIncorrect":204,"multiChoiceMultiSelect":6,"multiChoiceRevealAnswerOption":6},[201],"In a recent Turing Test study, how often was ChatGPT misidentified as a human?",[203],"54%",[205,206,207],"22%","45%","60%",{"id":209,"data":210,"type":54,"version":25,"maxContentLevel":28},"badc9a04-c019-45d7-b439-87f5b72c8609",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":211,"multiChoiceCorrect":213,"multiChoiceIncorrect":214,"multiChoiceMultiSelect":6,"multiChoiceRevealAnswerOption":6},[212],"In a 1960s Turing Test study, how often was the Eliza chatbot misidentified as a human?",[205],[203,215,216],"30%","40%",{"id":218,"data":219,"type":25,"version":20,"maxContentLevel":19,"summaryPage":221,"introPage":229,"pages":235},"a15d06f6-c5bf-46bc-a5d3-d002cbc46fb1",{"type":25,"title":220},"Text-to-Image models",{"id":222,"data":223,"type":28,"maxContentLevel":28,"version":27},"ed0d5a90-7cd4-4801-8259-027858d546b6",{"type":28,"summary":224},[225,226,227,228],"Text-to-Image models can generate images from text prompts","Generative Adversarial Networks are a popular way to train Text-to-Image models","Instead of 'seeing' images, these models process images as patterns of numbers","They can be used to generate digital paintings, product blueprints, and more",{"id":230,"data":231,"type":40,"maxContentLevel":28,"version":27},"91658a78-52b4-4c67-b91a-91a142e8a177",{"type":40,"intro":232},[233,234],"How are Text-to-Image models trained to generate images?","Can these models actually 'see' the images they're generating?",[236,249,277,282,308],{"id":237,"data":238,"type":27,"maxContentLevel":28,"version":27,"reviews":241},"ddbea2bd-4eb3-4a0d-9c5c-7e62d0d9bfc1",{"type":27,"contentRole":25,"markdownContent":239,"audioMediaId":240},"Large Language Models, like ChatGPT, are probably the most striking and well known type of modern generative AI. But not far behind is another type: **Text-to-Image models**.\n\nYou've probably heard of at least one of them. Midjourney, DALL-E, Stable Diffusion...\n\nThese models can take a piece of text (a prompt) and use it to generate a picture. For example, if you prompted it to generate \"a picture of purple mountain,\" that's exactly what it would do.\n\n![Graph](image://7d7e7735-3884-44aa-be57-43e8c3afca68 \"A picture of a purple mountain.\")\n\nJust like those Large Language Models we looked at last time, these text-to-image models are extremely powerful and complex. The neural network behind the DALL-E model uses several billion parameters.","12742a18-7f6c-4c87-a962-d2361e0d2951",[242],{"id":243,"data":244,"type":54,"version":27,"maxContentLevel":28},"e15171c9-cffa-4a8d-8d07-92dbed3eba88",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":245,"multiChoiceCorrect":247,"multiChoiceIncorrect":248,"multiChoiceMultiSelect":132,"multiChoiceRevealAnswerOption":6},[246],"Which of these are examples of Text-to-Image models?",[129,130,131],[125,126,127],{"id":250,"data":251,"type":27,"maxContentLevel":19,"version":20,"reviews":254},"1580afff-9794-4c79-af6a-72fdc279b6c3",{"type":27,"contentRole":25,"markdownContent":252,"audioMediaId":253},"Training a Text-to-Image model will generally involve a massive dataset of images with descriptive captions.\n\nThis is a great example of **supervised learning**: training a model using a labeled set of input-output pairs. The descriptive caption is the input, while the image is the output.\n\nThis approach allows the model to learn the relationship between words (\"a ginger cat beside a book on a windowsill\") and visuals (the image below).\n\n![Graph](image://2e8357de-d3be-4c97-9072-52712b73423a \"A ginger cat beside a book on a windowsill.\")\n\nIt's worth pointing out that a dataset like this isn't easy to put together. One example is Microsoft's Common Objects in Context (COCO), which features more than 100,000 different images, each with five descriptive captions. All of these captions had to be manually written by a team of human annotators.","81493ee2-53c7-4ccb-b062-7e3866724a08",[255,264],{"id":256,"data":257,"type":54,"version":27,"maxContentLevel":28},"0330f9f3-29a9-47ed-80d7-c7765e0dd0d5",{"type":54,"reviewType":25,"spacingBehaviour":27,"binaryQuestion":258,"binaryCorrect":260,"binaryIncorrect":262},[259],"Text-to-Image models are typically trained using a labelled dataset of input-output pairs. What are these pairs?",[261],"Descriptive caption (input), visual image (output)",[263],"Visual image (input), descriptive caption (output)",{"id":265,"data":266,"type":54,"version":20,"maxContentLevel":19},"46ae3603-f6ce-4c73-afe5-9f69dce492d5",{"type":54,"reviewType":28,"evolvingBehavior":28,"spacingBehaviour":27,"multiChoiceQuestion":267,"multiChoiceCorrect":269,"multiChoiceIncorrect":273,"multiChoiceMultiSelect":132,"multiChoiceRevealAnswerOption":6},[268],"Which of these statements are true of Microsoft's Common Objects in Context dataset?",[270,271,272],"Contains 100,000 images","Every image has 5 captions","The captions were written manually",[274,275,276],"Contains 10,000 images","Every image has 10 captions","The captions were AI generated",{"id":278,"data":279,"type":27,"maxContentLevel":28,"version":27},"6638c6c4-502c-4240-bce9-b0e3c5b285e0",{"type":27,"contentRole":25,"markdownContent":280,"audioMediaId":281},"Text-to-Image models can also be built using something called a **Generative Adversarial Network (GAN)**.\n\nThis approach involves a pair of neural networks which are designed to work against each other. The first neural network (the Generator) produces AI generated images. The second network (the Discriminator) looks at these images, along with some real-life images, and tries to guess which ones are which.\n\nIt's almost as though the two networks are playing a game. If the Generator manages to fool the Discriminator, it gets positive feedback. If it fails, the Discriminator gets positive feedback. Over time, both networks get better and better at generating and detecting AI generated images.\n\nThe GAN approach isn't only used for Text-to-Image models. It can also be used to generate text. But it seems to work especially well for images.","02f108fd-e74c-4f90-bfcd-08f2a955dcd9",{"id":283,"data":284,"type":27,"maxContentLevel":28,"version":27,"reviews":287},"4834558b-cbbf-4bc3-9a2f-bfd0b982014e",{"type":27,"contentRole":25,"markdownContent":285,"audioMediaId":286},"Now, it's important to remember that when a Text-to-Image model learns to generate images, it's not actually 'aware' of what those images look like. At least, not in the same way as a human would be if they were drawing, or taking a photo.\n\nInstead, every image is converted into some kind of numerical structure which the computer is able to work with. There are different ways to go about this, but one simple example would be a map of pixels, each with a number that describes that pixel's color.\n\n![Graph](image://3a15386b-e22b-47bb-960c-5f8f9089ff0d \"Simplified map of pixels.\")\n\nIn other words, what's happening behind the scenes of a Text-to-Image model is similar to an LLM. An image is treated as a complex pattern of numbers, exactly the same as an LLM might treat a piece of text.","ea17fe75-0977-4e32-96bf-933b7fa990ca",[288,295],{"id":289,"data":290,"type":54,"version":27,"maxContentLevel":28},"e5909c81-a2a0-4be3-89fb-fa8fd1b250fb",{"type":54,"reviewType":20,"spacingBehaviour":27,"clozeQuestion":291,"clozeWords":293},[292],"An AI can treat a digital image as a numerical structure, because every pixel uses a number to describe its color.",[294],"pixel",{"id":296,"data":297,"type":54,"version":27,"maxContentLevel":28},"179dafc9-67c0-487c-9f5f-8057a24d068e",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":298,"multiChoiceCorrect":300,"multiChoiceIncorrect":304,"multiChoiceMultiSelect":132,"multiChoiceRevealAnswerOption":6},[299],"Which of these statements are true of a Generative Adversarial Network (GAN)?",[301,302,303],"The Generator produces AI images","The Discriminator guesses if those images are real","If the Generator fools the Discriminator, the Generator gets positive feedback",[305,306,307],"The Discriminator produces AI images","The Generator guesses if those images are real","If the Discriminator fools the Generator, the Generator gets positive feedback",{"id":309,"data":310,"type":27,"maxContentLevel":28,"version":27},"b814488c-9dee-4032-b801-1163055c7e50",{"type":27,"contentRole":25,"markdownContent":311,"audioMediaId":312},"The applications of Text-to-Image models are extremely vast and varied.\n\nJust like text generation from an LLM, these images lack the emotional depth and conceptual complexity that you would find in human-made artwork. But if you're willing to overlook that fact...\n\nWith the right choice of prompt, you can use them to generate digital paintings, product blueprints, marketing materials, greetings cards, fashion designs, game assets, memes, and more.\n\n![Graph](image://3713cf3e-0914-4b75-8be0-f8cc99e39e2d \"An octopus loading up Kinnu?\")\n\nWork is even being done to harness these models in medical settings. In the future, a surgeon could potentially describe a patient's symptoms to one of these models, and receive a diagram which helps them to plan the surgery.","39f4f39f-11e8-4a3f-b85d-79528e23a586",{"id":314,"data":315,"type":25,"version":27,"maxContentLevel":28,"summaryPage":317,"introPage":325,"pages":331},"5eb155ad-8d95-4df7-9290-1c57393873de",{"type":25,"title":316},"Other generative AI",{"id":318,"data":319,"type":28,"maxContentLevel":28,"version":27},"31fdcc60-2ff6-4a32-9b35-3b64897b49a6",{"type":28,"summary":320},[321,322,323,324],"Text-to-Speech models turn written text into spoken words","Text-to-Video models can generate moving visual content","Text-to-Music models can generate songs and melodies","The legality and ethics of training generative AI is uncertain",{"id":326,"data":327,"type":40,"maxContentLevel":28,"version":27},"1209dae7-c7d1-4054-bd6d-c5fbac9aa89a",{"type":40,"intro":328},[329,330],"What are the other main types of generative AI?","Why are some of these models being taken to court?",[332,345,362,367],{"id":333,"data":334,"type":27,"maxContentLevel":28,"version":27,"reviews":337},"2d5b26d8-e44c-4d1e-9d45-f1a0bbbcdcfa",{"type":27,"contentRole":25,"markdownContent":335,"audioMediaId":336},"Generative AI isn't only used for text and image generation. There are also lots of other models out there, which can produce a range of content, including speech, videos and music.\n\n**Text-to-Speech models** are able to turn written text into human-like spoken words. They're used all over the place, from voice assistants, to audiobooks, to tools for visually impaired people.\n\nThere are lots of different ways to build these models. But they're generally based on deep learning. If you train a model on thousands of written texts, plus audio files of people reading those texts, it can learn to sound like a real person talking.\n\nAs always, these models aren't aware that they're 'talking'. Just like LLMs, and Text-to-Image models, they treat their output (audio) as a piece of numerical data.\n\n![Graph](image://86ac7ff7-94c2-4b9c-aede-1fb0f74e0d7a \"Audio data. Image via Pexels\")","25d9050f-60cd-4cde-a27d-4cacde24e96a",[338],{"id":339,"data":340,"type":54,"version":27,"maxContentLevel":28},"1209c5ef-ceba-4c0d-88f0-481caa6ccd33",{"type":54,"reviewType":20,"spacingBehaviour":27,"clozeQuestion":341,"clozeWords":343},[342],"Text-to-Speech models turn written text into human-like spoken words.",[344],"Text-to-Speech",{"id":346,"data":347,"type":27,"maxContentLevel":28,"version":27,"reviews":350},"5e3a611c-5e8c-4a05-8d9c-65f8adade654",{"type":27,"contentRole":25,"markdownContent":348,"audioMediaId":349},"**Text-to-Video models** can generate moving visual content after receiving a written prompt. They're essentially just a more advanced and complicated version of a Text-to-Image model, maybe with some Text-to-Speech thrown in if the video is meant to have audio.\n\nThis particular type of generative AI isn't quite as popular as the others. That's mainly because it lacks consistency: even the best models have a tendency to produce slightly strange, uncanny videos.\n\nBut that could potentially change in the next few years, as more advanced models, trained on better datasets, are developed. If that does happen, it could have a major impact on fields like entertainment and advertising.","93c2738d-e6b0-4f41-b0bb-25f24555729d",[351],{"id":352,"data":353,"type":54,"version":27,"maxContentLevel":28},"e1a9af17-2f35-4178-b147-dec7cef9ac9b",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":354,"multiChoiceCorrect":356,"multiChoiceIncorrect":358,"multiChoiceMultiSelect":6,"multiChoiceRevealAnswerOption":6},[355],"Why might current Text-to-Video models be less popular than other Gen AI?",[357],"Their outputs are often uncanny and inconsistent",[359,360,361],"They need more training data than other models","They often lack user-friendly interfaces","They are illegal in a number of countries",{"id":363,"data":364,"type":27,"maxContentLevel":28,"version":27},"38f88fa5-ba54-4817-8857-752fd1fd0f62",{"type":27,"contentRole":25,"markdownContent":365,"audioMediaId":366},"**Text-to-Music models** are one more type of modern generative AI. They've been trained to generate pieces of music by looking at the patterns and structures of existing compositions.\n\n![Graph](image://07bd2135-c77a-4c32-be85-19063c19fa43 \"Music. Image via Pexels\")\n\nFor example, you could input a prompt like \"generate a calm, relaxing melody,\" and the model would generate a piece of music accordingly.\n\nOne of the most well-known examples of this is a model called Suno AI. But Suno is pretty controversial. In June 2024, it was actually sued by a group of record labels, including Universal, Sony, and Warner Music.\n\nWhy? Because Suno was supposedly trained on a dataset which featured songs from famous artists like Mariah Carey, Green Day, and Michael Jackson. According to the record labels, Suno's use of these songs was breaking copyright laws.","7782d1a5-ebe2-4ba4-823d-749f23b1ba38",{"id":368,"data":369,"type":27,"maxContentLevel":28,"version":27,"reviews":372},"886563d2-050b-4672-9c8f-47592adf43e6",{"type":27,"contentRole":25,"markdownContent":370,"audioMediaId":371},"Suno isn't the only AI with a dataset that's come under scrutiny.\n\nA number of artists have also been trying to take Text-to-Image models (like Midjourney) to court, after claiming that these models were trained on their art without getting any prior permission.\n\nIn 2023, a group of authors including George RR Martin, John Grisham, and Jodi Picoult, made similar claims against ChatGPT.\n\nAs things stand, the use of copyrighted content in AI training is still a bit of a gray area. There isn't just the legal question to think about, but the ethical question too. Is it fair to use a creator's work without asking for prior permission?\n\nFor now, let's put a pin in that question. We'll be taking a proper look at AI ethics later in the pathway.","358b38f6-6e3f-435c-b158-8609806ad677",[373,392,399],{"id":374,"data":375,"type":54,"version":27,"maxContentLevel":28},"42661e37-7495-436b-a519-6b976d7ba479",{"type":54,"reviewType":28,"spacingBehaviour":27,"collapsingSiblings":376,"multiChoiceQuestion":380,"multiChoiceCorrect":382,"multiChoiceIncorrect":384,"multiChoiceMultiSelect":6,"multiChoiceRevealAnswerOption":6,"matchPairsQuestion":388,"matchPairsPairs":389},[377,378,379],"37029b16-f9d4-43c1-bac7-30c9382ce308","14a4785e-81f2-43b7-b271-ba31052cd5c3","dae72ea9-487b-4523-84d4-fe9d3e7511be",[381],"The company behind which AI model was taken to court by a group of authors in 2023?",[383],"ChatGPT",[385,386,387],"Midjourney","Stable Diffusion","Claude",[109],[390],{"left":383,"right":391,"direction":28},"Taken to court by authors",{"id":393,"data":394,"type":54,"version":27,"maxContentLevel":28},"94083e4e-4b30-4d3d-9117-15264bf39bfc",{"type":54,"reviewType":27,"spacingBehaviour":27,"activeRecallQuestion":395,"activeRecallAnswers":397},[396],"Why might a creator want to take a Gen AI company to court?",[398],"Their content was used to train an AI without permission",{"id":400,"data":401,"type":54,"version":27,"maxContentLevel":28},"b35d7bcb-4131-4c1d-a902-e6a6fec13fe4",{"type":54,"reviewType":28,"spacingBehaviour":27,"multiChoiceQuestion":402,"multiChoiceCorrect":404,"multiChoiceIncorrect":406,"multiChoiceMultiSelect":6,"multiChoiceRevealAnswerOption":6},[403],"The company behind which AI model was taken to court by a group of record labels in 2024?",[405],"Suno AI",[407,386,387],"Gemini",[409,565,727,911],{"id":23,"data":24,"type":25,"version":27,"maxContentLevel":28,"summaryPage":29,"introPage":37,"pages":410},[411,453,497,536],{"id":46,"data":47,"type":27,"maxContentLevel":28,"version":27,"reviews":50,"parsed":412},{"data":413,"body":416,"toc":451},{"title":414,"description":415},"","In the last few years, Generative AI (or Gen AI, for short) has become a bit of a buzzword.",{"type":417,"children":418},"root",[419,434,439],{"type":420,"tag":421,"props":422,"children":423},"element","p",{},[424,427,432],{"type":425,"value":426},"text","In the last few years, ",{"type":420,"tag":428,"props":429,"children":430},"strong",{},[431],{"type":425,"value":17},{"type":425,"value":433}," (or Gen AI, for short) has become a bit of a buzzword.",{"type":420,"tag":421,"props":435,"children":436},{},[437],{"type":425,"value":438},"It's used to describe any AI model which can study a dataset, then use what it's learned to create something totally new. For example, it might study a thousand paintings, then generate a painting of its own. Or it might study a million bedtime stories, then write a new one completely from scratch.",{"type":420,"tag":421,"props":440,"children":441},{},[442,444,449],{"type":425,"value":443},"To give you some context, this is totally different to something like a descriptive AI, which explains and classifies data. Where descriptive AI is designed to explain, generative AI is specifically designed to ",{"type":420,"tag":428,"props":445,"children":446},{},[447],{"type":425,"value":448},"create",{"type":425,"value":450},".",{"title":414,"searchDepth":25,"depth":25,"links":452},[],{"id":64,"data":65,"type":27,"maxContentLevel":28,"version":27,"reviews":68,"parsed":454},{"data":455,"body":457,"toc":495},{"title":414,"description":456},"The most famous example of generative AI is ChatGPT, which exploded onto the global scene towards the end of 2022. It's an example of a particular subtype of AI, which is known as a Large Language Model (LLM).",{"type":417,"children":458},[459,470,475,480,485],{"type":420,"tag":421,"props":460,"children":461},{},[462,464,469],{"type":425,"value":463},"The most famous example of generative AI is ChatGPT, which exploded onto the global scene towards the end of 2022. It's an example of a particular subtype of AI, which is known as a ",{"type":420,"tag":428,"props":465,"children":466},{},[467],{"type":425,"value":468},"Large Language Model (LLM)",{"type":425,"value":450},{"type":420,"tag":421,"props":471,"children":472},{},[473],{"type":425,"value":474},"We mentioned LLMs last time. They're essentially just complex neural networks which specialize in natural language. As a general rule, we call it a 'Large' Language Model when it uses a billion parameters. The latest version of ChatGPT is said to have more than a trillion.",{"type":420,"tag":421,"props":476,"children":477},{},[478],{"type":425,"value":479},"These vast neural networks are trained on a colossal volume of data, sometimes called a corpus. This corpus might be made up of news articles, text messages, song lyrics, novels, screenplays, textbooks, emails, poems...",{"type":420,"tag":421,"props":481,"children":482},{},[483],{"type":425,"value":484},"Some of the world's most powerful LLMs were literally trained using all the data on the internet.",{"type":420,"tag":421,"props":486,"children":487},{},[488],{"type":420,"tag":489,"props":490,"children":494},"img",{"alt":491,"src":492,"title":493},"Graph","image://89700d4f-675f-4c9b-8239-f9ddeb49b3c7","Data. Image via Pexels",[],{"title":414,"searchDepth":25,"depth":25,"links":496},[],{"id":88,"data":89,"type":27,"maxContentLevel":28,"version":27,"reviews":92,"parsed":498},{"data":499,"body":501,"toc":534},{"title":414,"description":500},"You might be wondering what 'GPT' stands for: Generative Pre-trained Transformer.",{"type":417,"children":502},[503,507,519,524,529],{"type":420,"tag":421,"props":504,"children":505},{},[506],{"type":425,"value":500},{"type":420,"tag":421,"props":508,"children":509},{},[510,512,517],{"type":425,"value":511},"'Generative' and 'pre-trained' are self-explanatory. But ",{"type":420,"tag":428,"props":513,"children":514},{},[515],{"type":425,"value":516},"transformer",{"type":425,"value":518}," is a term that you might not have seen before. It refers to a specific learning framework that helps to improve the quality of an LLM's outputs.",{"type":420,"tag":421,"props":520,"children":521},{},[522],{"type":425,"value":523},"We won't get into all the details. But in simple terms, transformer architecture uses mathematical models to keep track of the overall context of a text, rather than just the last few words it has written.",{"type":420,"tag":421,"props":525,"children":526},{},[527],{"type":425,"value":528},"For example, if you used an LLM to write an email, and the first sentence mentioned the recipient's name, the transformer architecture helps it remember this context, and keep using that name throughout the rest of the email.",{"type":420,"tag":421,"props":530,"children":531},{},[532],{"type":425,"value":533},"It's worth pointing out that ChatGPT isn't the only transformer LLM on the market. Anthropic's Claude is another example. So is Google's Gemini.",{"title":414,"searchDepth":25,"depth":25,"links":535},[],{"id":114,"data":115,"type":27,"maxContentLevel":28,"version":27,"reviews":118,"parsed":537},{"data":538,"body":540,"toc":563},{"title":414,"description":539},"It has to be said: even the world's best LLMs still struggle to compete with human writing in more expressive or artistic contexts. They lack the kind of empathy and emotion which the very best writing is based on.",{"type":417,"children":541},[542,546,551],{"type":420,"tag":421,"props":543,"children":544},{},[545],{"type":425,"value":539},{"type":420,"tag":421,"props":547,"children":548},{},[549],{"type":425,"value":550},"But for simple writing, LLMs are extremely powerful tools. You can use them to write emails, or summarize articles, or rephrase complicated text. You can use them to draft to-do lists, or write computer code, or even just have a quick chat.",{"type":420,"tag":421,"props":552,"children":553},{},[554,556,561],{"type":425,"value":555},"Interestingly, the latest version of ChatGPT has also been trained to work with images and audio. In other words, it's no longer strictly an LLM – we can call this model ",{"type":420,"tag":428,"props":557,"children":558},{},[559],{"type":425,"value":560},"multimodal",{"type":425,"value":562},", which basically means it can perform more than one task.",{"title":414,"searchDepth":25,"depth":25,"links":564},[],{"id":161,"data":162,"type":25,"version":25,"maxContentLevel":28,"summaryPage":164,"introPage":170,"pages":566},[567,589,652,700],{"id":177,"data":178,"type":27,"maxContentLevel":28,"version":27,"parsed":568},{"data":569,"body":571,"toc":587},{"title":414,"description":570},"Before moving on from LLMs, here's an interesting question. Is an AI model like ChatGPT capable of passing the Turing Test? In this orb, we'll find out firsthand.",{"type":417,"children":572},[573,577,582],{"type":420,"tag":421,"props":574,"children":575},{},[576],{"type":425,"value":570},{"type":420,"tag":421,"props":578,"children":579},{},[580],{"type":425,"value":581},"There are a few variations of the Turing Test, but we'll be using one that looks like this. A machine and a human have a conversation. A second human (that's you!) reads the messages. At the end, you'll be asked a question: of the two participants, can you tell which one was a machine?",{"type":420,"tag":421,"props":583,"children":584},{},[585],{"type":425,"value":586},"The conversation will start on the page after this one. The participants are labeled Person A and Person B. But just to repeat: only one is a real person.",{"title":414,"searchDepth":25,"depth":25,"links":588},[],{"id":182,"data":183,"type":27,"maxContentLevel":28,"version":27,"parsed":590},{"data":591,"body":593,"toc":650},{"title":414,"description":592},"Person A: \"Hey, quick question – if robots had favorite movies, what kind of films do you think they’d be into?\"",{"type":417,"children":594},[595,605,615,624,633,642],{"type":420,"tag":421,"props":596,"children":597},{},[598,603],{"type":420,"tag":428,"props":599,"children":600},{},[601],{"type":425,"value":602},"Person A",{"type":425,"value":604},": \"Hey, quick question – if robots had favorite movies, what kind of films do you think they’d be into?\"",{"type":420,"tag":421,"props":606,"children":607},{},[608,613],{"type":420,"tag":428,"props":609,"children":610},{},[611],{"type":425,"value":612},"Person B",{"type":425,"value":614},": \"Interesting. Honestly, I have no idea. Science fiction, maybe? They'd probably like seeing themselves on screen.\"",{"type":420,"tag":421,"props":616,"children":617},{},[618,622],{"type":420,"tag":428,"props":619,"children":620},{},[621],{"type":425,"value":602},{"type":425,"value":623},": \"Yeah, I can see that. Probably big fans of The Terminator or Wall-E, right? Imagine a robot getting super invested in robot characters.\"",{"type":420,"tag":421,"props":625,"children":626},{},[627,631],{"type":420,"tag":428,"props":628,"children":629},{},[630],{"type":425,"value":612},{"type":425,"value":632},": \"Ha, yeah. Like, deciding that the Terminator was the ideal role model. Or developing a crush on Wall-E.\"",{"type":420,"tag":421,"props":634,"children":635},{},[636,640],{"type":420,"tag":428,"props":637,"children":638},{},[639],{"type":425,"value":602},{"type":425,"value":641},": \"Exactly! A robot trying to act all cool like the Terminator... but then secretly fangirling over Wall-E’s personality. That would be hilarious.\"",{"type":420,"tag":421,"props":643,"children":644},{},[645],{"type":420,"tag":428,"props":646,"children":647},{},[648],{"type":425,"value":649},"Conversation continued on next page.",{"title":414,"searchDepth":25,"depth":25,"links":651},[],{"id":187,"data":188,"type":27,"maxContentLevel":28,"version":27,"parsed":653},{"data":654,"body":656,"toc":698},{"title":414,"description":655},"Person B: \"Yep, it sure would be! How about you, though? Do you agree that robots would like science fiction films, or is there another genre they might prefer?\"",{"type":417,"children":657},[658,667,676,685,693],{"type":420,"tag":421,"props":659,"children":660},{},[661,665],{"type":420,"tag":428,"props":662,"children":663},{},[664],{"type":425,"value":612},{"type":425,"value":666},": \"Yep, it sure would be! How about you, though? Do you agree that robots would like science fiction films, or is there another genre they might prefer?\"",{"type":420,"tag":421,"props":668,"children":669},{},[670,674],{"type":420,"tag":428,"props":671,"children":672},{},[673],{"type":425,"value":602},{"type":425,"value":675},": \"Hmm, I feel like robots might also get into documentaries, you know? Something all factual and logical, like ‘How It's Made.’ Maybe they’d enjoy learning about how the world works – or how they work.\"",{"type":420,"tag":421,"props":677,"children":678},{},[679,683],{"type":420,"tag":428,"props":680,"children":681},{},[682],{"type":425,"value":612},{"type":425,"value":684},": \"I like this. It's like a human watching a documentary about the digestive system. It's important to know how your body works.\"",{"type":420,"tag":421,"props":686,"children":687},{},[688],{"type":420,"tag":428,"props":689,"children":690},{},[691],{"type":425,"value":692},"Conversation end.",{"type":420,"tag":421,"props":694,"children":695},{},[696],{"type":425,"value":697},"There you have it. A conversation between a machine and a human. But which one is which? Take a moment to think. Make a guess in your mind. When you're ready to find out the actual answer, please continue to the page after this one.",{"title":414,"searchDepth":25,"depth":25,"links":699},[],{"id":192,"data":193,"type":27,"maxContentLevel":28,"version":25,"reviews":196,"parsed":701},{"data":702,"body":704,"toc":725},{"title":414,"description":703},"Here's the answer: Person A was actually ChatGPT, while person B was a real-life human.",{"type":417,"children":705},[706,710,715,720],{"type":420,"tag":421,"props":707,"children":708},{},[709],{"type":425,"value":703},{"type":420,"tag":421,"props":711,"children":712},{},[713],{"type":425,"value":714},"Maybe you guessed that correctly. Maybe you got it wrong. But either way, it probably wasn't easy. Would you have been confident enough to bet your savings on it?",{"type":420,"tag":421,"props":716,"children":717},{},[718],{"type":425,"value":719},"Just to be clear, none of this means that ChatGPT is sentient, or conscious, or an Artificial General Intelligence. It's still nothing more than an Artificial Narrow Intelligence using a neural network to mimic natural language.",{"type":420,"tag":421,"props":721,"children":722},{},[723],{"type":425,"value":724},"But it does this very effectively. In one recent study, using a similar test, GPT was misidentified as a human 54% of the time. Compare that to something like the Eliza chatbot, way back in the 1960s. In the same test, it was only misidentified 22% of the time.",{"title":414,"searchDepth":25,"depth":25,"links":726},[],{"id":218,"data":219,"type":25,"version":20,"maxContentLevel":19,"summaryPage":221,"introPage":229,"pages":728},[729,770,812,846,876],{"id":237,"data":238,"type":27,"maxContentLevel":28,"version":27,"reviews":241,"parsed":730},{"data":731,"body":733,"toc":768},{"title":414,"description":732},"Large Language Models, like ChatGPT, are probably the most striking and well known type of modern generative AI. But not far behind is another type: Text-to-Image models.",{"type":417,"children":734},[735,745,750,755,763],{"type":420,"tag":421,"props":736,"children":737},{},[738,740,744],{"type":425,"value":739},"Large Language Models, like ChatGPT, are probably the most striking and well known type of modern generative AI. But not far behind is another type: ",{"type":420,"tag":428,"props":741,"children":742},{},[743],{"type":425,"value":220},{"type":425,"value":450},{"type":420,"tag":421,"props":746,"children":747},{},[748],{"type":425,"value":749},"You've probably heard of at least one of them. Midjourney, DALL-E, Stable Diffusion...",{"type":420,"tag":421,"props":751,"children":752},{},[753],{"type":425,"value":754},"These models can take a piece of text (a prompt) and use it to generate a picture. For example, if you prompted it to generate \"a picture of purple mountain,\" that's exactly what it would do.",{"type":420,"tag":421,"props":756,"children":757},{},[758],{"type":420,"tag":489,"props":759,"children":762},{"alt":491,"src":760,"title":761},"image://7d7e7735-3884-44aa-be57-43e8c3afca68","A picture of a purple mountain.",[],{"type":420,"tag":421,"props":764,"children":765},{},[766],{"type":425,"value":767},"Just like those Large Language Models we looked at last time, these text-to-image models are extremely powerful and complex. The neural network behind the DALL-E model uses several billion parameters.",{"title":414,"searchDepth":25,"depth":25,"links":769},[],{"id":250,"data":251,"type":27,"maxContentLevel":19,"version":20,"reviews":254,"parsed":771},{"data":772,"body":774,"toc":810},{"title":414,"description":773},"Training a Text-to-Image model will generally involve a massive dataset of images with descriptive captions.",{"type":417,"children":775},[776,780,792,797,805],{"type":420,"tag":421,"props":777,"children":778},{},[779],{"type":425,"value":773},{"type":420,"tag":421,"props":781,"children":782},{},[783,785,790],{"type":425,"value":784},"This is a great example of ",{"type":420,"tag":428,"props":786,"children":787},{},[788],{"type":425,"value":789},"supervised learning",{"type":425,"value":791},": training a model using a labeled set of input-output pairs. The descriptive caption is the input, while the image is the output.",{"type":420,"tag":421,"props":793,"children":794},{},[795],{"type":425,"value":796},"This approach allows the model to learn the relationship between words (\"a ginger cat beside a book on a windowsill\") and visuals (the image below).",{"type":420,"tag":421,"props":798,"children":799},{},[800],{"type":420,"tag":489,"props":801,"children":804},{"alt":491,"src":802,"title":803},"image://2e8357de-d3be-4c97-9072-52712b73423a","A ginger cat beside a book on a windowsill.",[],{"type":420,"tag":421,"props":806,"children":807},{},[808],{"type":425,"value":809},"It's worth pointing out that a dataset like this isn't easy to put together. One example is Microsoft's Common Objects in Context (COCO), which features more than 100,000 different images, each with five descriptive captions. All of these captions had to be manually written by a team of human annotators.",{"title":414,"searchDepth":25,"depth":25,"links":811},[],{"id":278,"data":279,"type":27,"maxContentLevel":28,"version":27,"parsed":813},{"data":814,"body":816,"toc":844},{"title":414,"description":815},"Text-to-Image models can also be built using something called a Generative Adversarial Network (GAN).",{"type":417,"children":817},[818,829,834,839],{"type":420,"tag":421,"props":819,"children":820},{},[821,823,828],{"type":425,"value":822},"Text-to-Image models can also be built using something called a ",{"type":420,"tag":428,"props":824,"children":825},{},[826],{"type":425,"value":827},"Generative Adversarial Network (GAN)",{"type":425,"value":450},{"type":420,"tag":421,"props":830,"children":831},{},[832],{"type":425,"value":833},"This approach involves a pair of neural networks which are designed to work against each other. The first neural network (the Generator) produces AI generated images. The second network (the Discriminator) looks at these images, along with some real-life images, and tries to guess which ones are which.",{"type":420,"tag":421,"props":835,"children":836},{},[837],{"type":425,"value":838},"It's almost as though the two networks are playing a game. If the Generator manages to fool the Discriminator, it gets positive feedback. If it fails, the Discriminator gets positive feedback. Over time, both networks get better and better at generating and detecting AI generated images.",{"type":420,"tag":421,"props":840,"children":841},{},[842],{"type":425,"value":843},"The GAN approach isn't only used for Text-to-Image models. It can also be used to generate text. But it seems to work especially well for images.",{"title":414,"searchDepth":25,"depth":25,"links":845},[],{"id":283,"data":284,"type":27,"maxContentLevel":28,"version":27,"reviews":287,"parsed":847},{"data":848,"body":850,"toc":874},{"title":414,"description":849},"Now, it's important to remember that when a Text-to-Image model learns to generate images, it's not actually 'aware' of what those images look like. At least, not in the same way as a human would be if they were drawing, or taking a photo.",{"type":417,"children":851},[852,856,861,869],{"type":420,"tag":421,"props":853,"children":854},{},[855],{"type":425,"value":849},{"type":420,"tag":421,"props":857,"children":858},{},[859],{"type":425,"value":860},"Instead, every image is converted into some kind of numerical structure which the computer is able to work with. There are different ways to go about this, but one simple example would be a map of pixels, each with a number that describes that pixel's color.",{"type":420,"tag":421,"props":862,"children":863},{},[864],{"type":420,"tag":489,"props":865,"children":868},{"alt":491,"src":866,"title":867},"image://3a15386b-e22b-47bb-960c-5f8f9089ff0d","Simplified map of pixels.",[],{"type":420,"tag":421,"props":870,"children":871},{},[872],{"type":425,"value":873},"In other words, what's happening behind the scenes of a Text-to-Image model is similar to an LLM. An image is treated as a complex pattern of numbers, exactly the same as an LLM might treat a piece of text.",{"title":414,"searchDepth":25,"depth":25,"links":875},[],{"id":309,"data":310,"type":27,"maxContentLevel":28,"version":27,"parsed":877},{"data":878,"body":880,"toc":909},{"title":414,"description":879},"The applications of Text-to-Image models are extremely vast and varied.",{"type":417,"children":881},[882,886,891,896,904],{"type":420,"tag":421,"props":883,"children":884},{},[885],{"type":425,"value":879},{"type":420,"tag":421,"props":887,"children":888},{},[889],{"type":425,"value":890},"Just like text generation from an LLM, these images lack the emotional depth and conceptual complexity that you would find in human-made artwork. But if you're willing to overlook that fact...",{"type":420,"tag":421,"props":892,"children":893},{},[894],{"type":425,"value":895},"With the right choice of prompt, you can use them to generate digital paintings, product blueprints, marketing materials, greetings cards, fashion designs, game assets, memes, and more.",{"type":420,"tag":421,"props":897,"children":898},{},[899],{"type":420,"tag":489,"props":900,"children":903},{"alt":491,"src":901,"title":902},"image://3713cf3e-0914-4b75-8be0-f8cc99e39e2d","An octopus loading up Kinnu?",[],{"type":420,"tag":421,"props":905,"children":906},{},[907],{"type":425,"value":908},"Work is even being done to harness these models in medical settings. In the future, a surgeon could potentially describe a patient's symptoms to one of these models, and receive a diagram which helps them to plan the surgery.",{"title":414,"searchDepth":25,"depth":25,"links":910},[],{"id":314,"data":315,"type":25,"version":27,"maxContentLevel":28,"summaryPage":317,"introPage":325,"pages":912},[913,953,981,1022],{"id":333,"data":334,"type":27,"maxContentLevel":28,"version":27,"reviews":337,"parsed":914},{"data":915,"body":917,"toc":951},{"title":414,"description":916},"Generative AI isn't only used for text and image generation. There are also lots of other models out there, which can produce a range of content, including speech, videos and music.",{"type":417,"children":918},[919,923,933,938,943],{"type":420,"tag":421,"props":920,"children":921},{},[922],{"type":425,"value":916},{"type":420,"tag":421,"props":924,"children":925},{},[926,931],{"type":420,"tag":428,"props":927,"children":928},{},[929],{"type":425,"value":930},"Text-to-Speech models",{"type":425,"value":932}," are able to turn written text into human-like spoken words. They're used all over the place, from voice assistants, to audiobooks, to tools for visually impaired people.",{"type":420,"tag":421,"props":934,"children":935},{},[936],{"type":425,"value":937},"There are lots of different ways to build these models. But they're generally based on deep learning. If you train a model on thousands of written texts, plus audio files of people reading those texts, it can learn to sound like a real person talking.",{"type":420,"tag":421,"props":939,"children":940},{},[941],{"type":425,"value":942},"As always, these models aren't aware that they're 'talking'. Just like LLMs, and Text-to-Image models, they treat their output (audio) as a piece of numerical data.",{"type":420,"tag":421,"props":944,"children":945},{},[946],{"type":420,"tag":489,"props":947,"children":950},{"alt":491,"src":948,"title":949},"image://86ac7ff7-94c2-4b9c-aede-1fb0f74e0d7a","Audio data. Image via Pexels",[],{"title":414,"searchDepth":25,"depth":25,"links":952},[],{"id":346,"data":347,"type":27,"maxContentLevel":28,"version":27,"reviews":350,"parsed":954},{"data":955,"body":957,"toc":979},{"title":414,"description":956},"Text-to-Video models can generate moving visual content after receiving a written prompt. They're essentially just a more advanced and complicated version of a Text-to-Image model, maybe with some Text-to-Speech thrown in if the video is meant to have audio.",{"type":417,"children":958},[959,969,974],{"type":420,"tag":421,"props":960,"children":961},{},[962,967],{"type":420,"tag":428,"props":963,"children":964},{},[965],{"type":425,"value":966},"Text-to-Video models",{"type":425,"value":968}," can generate moving visual content after receiving a written prompt. They're essentially just a more advanced and complicated version of a Text-to-Image model, maybe with some Text-to-Speech thrown in if the video is meant to have audio.",{"type":420,"tag":421,"props":970,"children":971},{},[972],{"type":425,"value":973},"This particular type of generative AI isn't quite as popular as the others. That's mainly because it lacks consistency: even the best models have a tendency to produce slightly strange, uncanny videos.",{"type":420,"tag":421,"props":975,"children":976},{},[977],{"type":425,"value":978},"But that could potentially change in the next few years, as more advanced models, trained on better datasets, are developed. If that does happen, it could have a major impact on fields like entertainment and advertising.",{"title":414,"searchDepth":25,"depth":25,"links":980},[],{"id":363,"data":364,"type":27,"maxContentLevel":28,"version":27,"parsed":982},{"data":983,"body":985,"toc":1020},{"title":414,"description":984},"Text-to-Music models are one more type of modern generative AI. They've been trained to generate pieces of music by looking at the patterns and structures of existing compositions.",{"type":417,"children":986},[987,997,1005,1010,1015],{"type":420,"tag":421,"props":988,"children":989},{},[990,995],{"type":420,"tag":428,"props":991,"children":992},{},[993],{"type":425,"value":994},"Text-to-Music models",{"type":425,"value":996}," are one more type of modern generative AI. They've been trained to generate pieces of music by looking at the patterns and structures of existing compositions.",{"type":420,"tag":421,"props":998,"children":999},{},[1000],{"type":420,"tag":489,"props":1001,"children":1004},{"alt":491,"src":1002,"title":1003},"image://07bd2135-c77a-4c32-be85-19063c19fa43","Music. Image via Pexels",[],{"type":420,"tag":421,"props":1006,"children":1007},{},[1008],{"type":425,"value":1009},"For example, you could input a prompt like \"generate a calm, relaxing melody,\" and the model would generate a piece of music accordingly.",{"type":420,"tag":421,"props":1011,"children":1012},{},[1013],{"type":425,"value":1014},"One of the most well-known examples of this is a model called Suno AI. But Suno is pretty controversial. In June 2024, it was actually sued by a group of record labels, including Universal, Sony, and Warner Music.",{"type":420,"tag":421,"props":1016,"children":1017},{},[1018],{"type":425,"value":1019},"Why? Because Suno was supposedly trained on a dataset which featured songs from famous artists like Mariah Carey, Green Day, and Michael Jackson. According to the record labels, Suno's use of these songs was breaking copyright laws.",{"title":414,"searchDepth":25,"depth":25,"links":1021},[],{"id":368,"data":369,"type":27,"maxContentLevel":28,"version":27,"reviews":372,"parsed":1023},{"data":1024,"body":1026,"toc":1052},{"title":414,"description":1025},"Suno isn't the only AI with a dataset that's come under scrutiny.",{"type":417,"children":1027},[1028,1032,1037,1042,1047],{"type":420,"tag":421,"props":1029,"children":1030},{},[1031],{"type":425,"value":1025},{"type":420,"tag":421,"props":1033,"children":1034},{},[1035],{"type":425,"value":1036},"A number of artists have also been trying to take Text-to-Image models (like Midjourney) to court, after claiming that these models were trained on their art without getting any prior permission.",{"type":420,"tag":421,"props":1038,"children":1039},{},[1040],{"type":425,"value":1041},"In 2023, a group of authors including George RR Martin, John Grisham, and Jodi Picoult, made similar claims against ChatGPT.",{"type":420,"tag":421,"props":1043,"children":1044},{},[1045],{"type":425,"value":1046},"As things stand, the use of copyrighted content in AI training is still a bit of a gray area. There isn't just the legal question to think about, but the ethical question too. Is it fair to use a creator's work without asking for prior permission?",{"type":420,"tag":421,"props":1048,"children":1049},{},[1050],{"type":425,"value":1051},"For now, let's put a pin in that question. We'll be taking a proper look at AI ethics later in the pathway.",{"title":414,"searchDepth":25,"depth":25,"links":1053},[],{"left":4,"top":4,"width":1055,"height":1055,"rotate":4,"vFlip":6,"hFlip":6,"body":1056},24,"\u003Cpath fill=\"none\" stroke=\"currentColor\" stroke-linecap=\"round\" stroke-linejoin=\"round\" stroke-width=\"2\" d=\"m9 18l6-6l-6-6\"/>",{"left":4,"top":4,"width":1055,"height":1055,"rotate":4,"vFlip":6,"hFlip":6,"body":1058},"\u003Cpath fill=\"none\" stroke=\"currentColor\" stroke-linecap=\"round\" stroke-linejoin=\"round\" stroke-width=\"2\" d=\"M4 5h16M4 12h16M4 19h16\"/>",1778179450202]