Voices in AI – Episode 24: A Conversation with Deep Varma

.voice-in-ai-byline-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-byline-embed span {
color: #FF6B00;
}

In this episode, Byron and Deep talk about the nervous system, AGI, the Turing Test, Watson, Alexa, security, and privacy.




0:00


0:00


0:00

var go_alex_briefing = {
expanded: true,
get_vars: {},
twitter_player: false,
auto_play: false
};

(function( $ ) {
‘use strict’;

go_alex_briefing.init = function() {
this.build_get_vars();

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘action’] ) {
this.twitter_player = ‘true’;
}

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘auto_play’] ) {
this.auto_play = go_alex_briefing.get_vars[‘auto_play’];
}

if ( ‘true’ == this.twitter_player ) {
$( ‘#top-header’ ).remove();
}

var $amplitude_args = {
‘songs’: [{“name”:”Episode 24: A Conversation with Deep Varma”,”artist”:”Byron Reese”,”album”:”Voices in AI”,”url”:”https:\/\/voicesinai.s3.amazonaws.com\/2017-12-04-(00-55-19)-deep-varma.mp3″,”live”:false,”cover_art_url”:”https:\/\/voicesinai.com\/wp-content\/uploads\/2017\/12\/voices-headshot-card_preview-1.jpeg”}],
‘default_album_art’: ‘https://gigaom.com/wp-content/plugins/go-alexa-briefing/components/external/amplify/images/no-cover-large.png’
};

if ( ‘true’ == this.auto_play ) {
$amplitude_args.autoplay = true;
}

Amplitude.init( $amplitude_args );

this.watch_controls();
};

go_alex_briefing.watch_controls = function() {
$( ‘#small-player’ ).hover( function() {
$( ‘#small-player-middle-controls’ ).show();
$( ‘#small-player-middle-meta’ ).hide();
}, function() {
$( ‘#small-player-middle-controls’ ).hide();
$( ‘#small-player-middle-meta’ ).show();

});

$( ‘#top-header’ ).hover(function(){
$( ‘#top-header’ ).show();
$( ‘#small-player’ ).show();
}, function(){

});

$( ‘#small-player-toggle’ ).click(function(){
$( ‘.hidden-on-collapse’ ).show();
$( ‘.hidden-on-expanded’ ).hide();
/*
Is expanded
*/
go_alex_briefing.expanded = true;
});

$(‘#top-header-toggle’).click(function(){
$( ‘.hidden-on-collapse’ ).hide();
$( ‘.hidden-on-expanded’ ).show();
/*
Is collapsed
*/
go_alex_briefing.expanded = false;
});

// We’re hacking it a bit so it works the way we want
$( ‘#small-player-toggle’ ).click();
$( ‘#top-header-toggle’ ).hide();
};

go_alex_briefing.build_get_vars = function() {
if( document.location.toString().indexOf( ‘?’ ) !== -1 ) {

var query = document.location
.toString()
// get the query string
.replace(/^.*?\?/, ”)
// and remove any existing hash string (thanks, @vrijdenker)
.replace(/#.*$/, ”)
.split(‘&’);

for( var i=0, l=query.length; i<l; i++ ) {
var aux = decodeURIComponent( query[i] ).split( '=' );
this.get_vars[ aux[0] ] = aux[1];
}
}
};

$( function() {
go_alex_briefing.init();
});
})( jQuery );

.go-alexa-briefing-player {
margin-bottom: 3rem;
margin-right: 0;
float: none;
}

.go-alexa-briefing-player div#top-header {
width: 100%;
max-width: 1000px;
min-height: 50px;
}

.go-alexa-briefing-player div#top-large-album {
width: 100%;
max-width: 1000px;
height: auto;
margin-right: auto;
margin-left: auto;
z-index: 0;
margin-top: 50px;
}

.go-alexa-briefing-player div#top-large-album img#large-album-art {
width: 100%;
height: auto;
border-radius: 0;
}

.go-alexa-briefing-player div#small-player {
margin-top: 38px;
width: 100%;
max-width: 1000px;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info {
width: 90%;
text-align: center;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info div#song-time-visualization-large {
width: 75%;
}

.go-alexa-briefing-player div#small-player-full-bottom {
background-color: #f2f2f2;
border-bottom-left-radius: 5px;
border-bottom-right-radius: 5px;
height: 57px;
}

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}

Byron Reese: This is Voices in AI, brought to you by Gigaom. I’m Byron Reese. Today our guest is Deep Varma, he is the VP of Data Engineering and Science over at Trulia. He holds a Bachelor’s of Science in Computer Science. He has a Master’s degree in Management Information Systems, and he even has an MBA from Berkeley to top all of that off. Welcome to the show, Deep.

Deep Varma: Thank you. Thanks, Byron, for having me here.

I’d like to start with my Rorschach test question, which is, what is artificial intelligence?

Awesome. Yeah, so as I define artificial intelligence, this is an intelligence created by machines based on human wisdom, to augment a human’s lifestyle to help them make the smarter choices. So that’s how I define artificial intelligence in a very simple and the layman terms.

But you just kind of used the word, “smart” and “intelligent” in the definition. What actually is intelligence?

Yeah, I think the intelligence part, what we need to understand is, when you think about human beings, most of the time, they are making decisions, they are making choices. And AI, artificially, is helping us to make smarter choices and decisions.

A very clear-cut example, which sometimes what we don’t see, is, I still remember in the old days I used to have this conventional thermostat at my home, which turns on and off manually. Then, suddenly, here comes artificial intelligence, which gave us Nest. Now as soon as I put the Nest there, it’s an intelligence. It is sensing that someone is there in the home, or not, so there’s motion sensing. Then it is seeing what kind of temperature do I like during summer time, during winter time. And so, artificially, the software, which is the brain that we have put on this device, is doing this intelligence, and saying, “great, this is what I’m going to do.” So, in one way it augmented my lifestyle—rather than me making those decisions, it is helping me make the smart choices. So, that’s what I meant by this intelligence piece here.

Well, let me take a different tack, in what sense is it artificial? Is that Nest thermostat, is it actually intelligent, or is it just mimicking intelligence, or are those the same thing?

What we are doing is, we are putting some sensors there on those devices—think about the central nervous system, what human beings have, it is a small piece of a software which is embedded within that device, which is making decisions for you—so it is trying to mimic, it is trying to make some predictions based on some of the data it is collecting. So, in one way, if you step back, that’s what human beings are doing on a day-to-day basis. There is a piece of it where you can go with a hybrid approach. It is mimicking as well as trying to learn, also.

Do you think we learn a lot about artificial intelligence by studying how humans learn things? Is that the first step when you want to do computer vision or translation, do you start by saying, “Ok, how do I do it?” Or, do you start by saying, “Forget how a human does it, what would be the way a machine would do it?

Yes, I think it is very tough to compare the two entities, because the way human brains, or the central nervous system, the speed that they process the data, machines are still not there at the same pace. So, I think the difference here is, when I grew up my parents started telling me, “Hey, this is Taj Mahal. The sky is blue,” and I started taking this data, and I started inferring and then I started passing this information to others.

It’s the same way with machines, the only difference here is that we are feeding information to machines. We are saying, “Computer vision: here is a photograph of a cat, here is a photograph of a cat, too,” and we keep on feeding this information—the same way we are feeding information to our brains—so the machines get trained. Then, over a period of time, when we show another image of a cat, we don’t need to say, “This is a cat, Machine.” The machine will say, “Oh, I found out that this is a cat.”

So, I think this is the difference between a machine and a human being, where, in the case of machine, we are feeding the information to them, in one form or another, using devices; but in the case of human beings, you have conscious learning, you have the physical aspects around you that affect how you’re learning. So that’s, I think, where we are with artificial intelligence, which is still in the infancy stage.

Humans are really good at transfer learning, right, like I can show you a picture of a miniature version of the Statue of Liberty, and then I can show you a bunch of photos and you can tell when it’s upside down, or half in water, or obscured by light and all that. We do that really well. 

How close are we to being able to feed computers a bunch of photos of cats, and the computer nails the cat thing, but then we only feed it three or four images of mice, and it takes all that stuff it knows about different cats, and it is able to figure out all about different mice?

So, is your question, do we think these machines are going to be at the same level as human beings at doing this?

No, I guess the question is, if we have to teach, “Here’s a cat, here’s a thimble, here’s ten thousand thimbles, here’s a pin cushion, here’s ten thousand more pin cushions…” If we have to do one thing at a time, we’re never going to get there. What we’ve got to do is, like, learn how to abstract up level, and say, “Here’s a manatee,” and it should be able to spot a manatee in any situation.

Yeah, and I think this is where we start moving into the general intelligence area. This is where it is becoming a little interesting and challenging, because human beings falls under more of the general intelligence, and machines are still falling under the artificial intelligence framework.

And the example you were giving, I have two boys, and when my boys were young, I’d tell them, “Hey, this is milk,” and I’d show them milk two times and they knew, “Awesome, this is milk.” And here come the machines, and you keep feeding them the big data with the hope that they will learn and they will say, “This is basically a picture of a mouse or this is a picture of a cat.”

This is where, I think, this artificial general intelligence which is shaping up—that we are going to abstract a level up, and start conditioning—but I feel we haven’t cracked the code for one level down yet. So, I think it’s going to take us time to get to the next level, I believe, at this time.

Believe me, I understand that. It’s funny, when you chat with people who spend their days working on these problems, they’re worried about, “How am I going to solve this problem I have tomorrow?” They’re not as concerned about that. That being said, everybody kind of likes to think about an AGI. 

AI is, what, six decades old and we’ve been making progress, do you believe that that is something that is going to evolve into an AGI? Like, we’re on that path already, and we’re just one percent of the way there? Or, is an AGI is something completely different? It’s not just a better narrow AI, it’s not just a bunch of narrow AI’s bolted together, it’s a completely different thing. What do you say?

Yes, so what I will say, it is like in the software development of computer systems—we call this as an object, and then we do inheritance of a couple of objects, and the encapsulation of the objects. When you think about what is happening in artificial intelligence, there are companies, like Trulia, who are investing in building the computer vision for real estate. There are companies investing in building the computer vision for cars, and all those things. We are in this state where all these dysfunctional, disassociated investments in our system are happening, and there are pieces that are going to come out of that which will go towards AGI.

Where I tend to disagree, I believe AI is complimenting us and AGI is replicating us. And this is where I tend to believe that the day the AGI comes—that means it’s a singularity that they are reaching wisdom or the processing power of human beings—that, to me, seems like doomsday, right? Because that those machines are going to be smarter than us, and they will control us.

And the reason I believe that, and there is a scientific reason for my belief; it’s because we know that in the central nervous system the core tool is the neurons, and we know neurons carry two signals—chemical and electrical. Machines can carry the electrical signals, but the chemical signals are the ones which generate these sensory signals—you touch something, you feel it. And this is where I tend to believe that AGI is not going to happen, I’m close to confident. Thinking machines are going to come—IBM Watson, as an example—so that’s how I’m differentiating it at this time.

So, to be clear, you said you don’t believe we’ll ever make an AGI?

I will be the one on the extreme end, but I will say yes.

That’s fascinating. Why is that? The normal argument is a reductionist argument. It says, you are some number of trillions of cells that come together, and there’s an emergent you” that comes out of that. And, hypothetically, if we made a synthetic copy of every one of those cells, and connected them, and did all that, there would be another Deep Varma. So where do you think the flaw in that logic is?

I think the flaw in that logic is that the general intelligence that humans have is also driven by the emotional side, and the emotional side—basically, I call it a chemical soup—is, I feel, the part of the DNA which is not going to be possible to replicate in these machines. These machines will learn by themselves—we recently saw what happened with Facebook, where Facebook machines were talking to each other and they start inventing their own language, over a period of time—but I believe the chemical mix of humans is what is next to impossible to produce it.

I mean—and I don’t want to take a stand because we have seen proven, over the decades, what people used to believe in the seventies has been proven to be right—I think the day we are able to find the chemical soup, it means we have found the Nirvana; and we have found out how human beings have been born and how they have been built over a period of time, and it took us, we all know, millions and millions of years to come to this stage. So that’s the part which is putting me on the other extreme end, to say, “Is there really going to another Deep Varma,” and if yes, then where is this emotional aspect, where are those things that are going to fit into the bigger picture which drives human beings onto the next level?

Well, I mean there’s a hundred questions rushing for the door right now. I’ll start with the first one. What do you think is the limit of what we’ll be able to do without the chemical part? So, for instance, let me ask a straight forward question—will we be able to build a machine that passes the Turing test?

Can we build that machine? I think, potentially, yes, we can.

So, you can carry on a conversation with it, and not be able to figure out that it’s a machine? So, in that case, it’s artificial intelligence in the sense that it really is artificial. It’s just running a program, saying some words, it’s running a program, saying some words, but there’s nobody home.

Yes, we have IBM Watson, which can go a level up as compared to Alexa. I think we will build machines which, behind the scenes, are trying to understand your intent and trying to have those conversations—like Alexa and Siri. And I believe they are going to eventually start becoming more like your virtual assistants, helping you make decisions, and complimenting you to make your lifestyle better. I think that’s definitely the direction we’re going to keep seeing investments going on.

I read a paper of yours where you made a passing reference to Westworld.

Right.

Putting aside the last several episodes, and what happened in them—I won’t give any spoilerstake just the first episode, do you think that we will be able to build machines that can interact with people like that?

I think, yes, we will.

But they won’t be truly creative and intelligent like we are?

That’s true.

Alright, fascinating. 

So, there seem to be these two very different camps about artificial intelligence. You have Elon Musk who says it’s an existential threat, you have Bill Gates who’s worried about it, you have Stephen Hawking who’s worried about it, and then there’s this other group of people that think that’s distracting

saw that Elon Musk spoke at the governor’s convention and said something and then Pedro Domingos, who wrote The Master Algorithmretweeted that article, and his whole tweet was, “One word: sigh. So, there’s this whole other group of people that think that’s just really distractingreally not going to happen, and they’re really put off by that kind of talk. 

Why do you think there’s such a gap between those two groups of people?

The gap is that there is one camp who is very curious, and they believe that millions of years of how human beings evolved can immediately be taken by AGI, and the other camp is more concerned with controlling that, asking are those machines going to become smarter than us, are they going to control us, are we going to become their slaves?

And I think those two camps are the extremes. There is a fear of losing control, because humans—if you look into the food chain, human beings are the only ones in the food chain, as of now, who control everything—fear that if those machines get to our level of wisdom, or smarter than us, we are going to lose control. And that’s where I think those two camps are basically coming to the extreme ends and taking their stands.

Let’s switch gears a little bit. Aside from the robot uprising, there’s a lot of fear wrapped up in the kind of AI we already know how to build, and it’s related to automation. Just to set up the question for the listener, there’s generally three camps. One camp says we’re going to have all this narrow AI, and it’s going to put a bunch of people out of work, people with less skills, and they’re not going to be able to get new work and we’re going to have, kind of, the GreaDepression going on forever. Then there’s a second group that says, no, no, it’s worse than that, computers can do anything a person can do, we’re all going to be replaced. And then there’s a third camp that says, that’s ridiculous, every time something comes along, like steam or electricity, people just take that technology, and use it to increase their own productivity, and that’s how progress happens. So, which of those three camps, or fourth one, perhaps, do you believe?

I fall into, mostly, the last camp, which is, we are going to increase the productivity of human beings; it means we will be able to deliver more and faster. A few months back, I was in Berkeley and we were having discussions around this same topic, about automation and how jobs are going to go away. The Obama administration even published a paper around this topic. One example which always comes in my mind is, last year I did a remodel of my house. And when I did the remodeling there were electrical wires, there are these water pipelines going inside my house and we had to replace them with copper pipelines, and I was thinking, can machines replace those job? I keep coming back to the answer that, those skill level jobs are going to be tougher and tougher to replace, but there are going to be productivity gains. Machines can help to cut those pipeline pieces much faster and in a much more accurate way. They can measure how much wire you’ll need to replace those things. So, I think those things are going to help us to make the smarter choices. I continue to believe it is going to be mostly the third camp, where machines will keep complementing us, helping to improve our lifestyles and to improve our productivity to make the smarter choices.

So, you would say that there are, in most jobs, there are elements that automation cannot replace, but it can augment, like a plumber, or so forth. What would you say to somebody who’s worried that they’re going to be unemployable in the future? What would you advise them to do?

Yeah, and the example I gave is a physical job, but think about an example of a business consultants, right? Companies hire business consultants to come, collect all the data, then prepare PowerPoints on what you should do, and what you should not do. I think those are the areas where artificial intelligence is going to come, and if you have tons of the data, then you don’t need a hundred consultants. For those people, I say go and start learning about what can be done to scale them to the next level. So, in the example I’ve just given, the business consultants, if they are doing an audit of a company with the financial books, look into the tools to help so that an audit that used to take thirty days now takes ten days. Improve how fast and how accurate you can make those predictions and assumptions using machines, so that those businesses can move on. So, I would tell them to start looking into, and partnering into, those areas early on, so that you are not caught by surprise when one day some industry comes and disrupts you, and you say, “Ouch, I never thought about it, and my job is no longer there.”

It sounds like you’re saying, figure out how to use more technology? That’s your best defense against it, is you just start using it to increase your own productivity.

Yeah.

Yeah, it’s interesting, because machine translation is getting comparable to a human, and yet generally people are bullish that we’re going to need more translators, because this is going to cause people to want to do more deals, and then they’re going to need to have contracts negotiated, and know about customs in other countries and all of that, so that actually being a translator you get more business out of this, not less, so do you think things like that are kind of the road map forward?

Yeah, that’s true.

So, what are some challenges with the technology? In Europe, there’s a movement—I think it’s already adopted in some places, but the EU is considering it—this idea that if an AI makes a decision about you, like do you get the loan, that you have the right to know why it made it. In other words, no black boxes. You have to have transparency and say it was made for this reason. Do you think a) that’s possible, and b) do you think it’s a good policy?

Yes, I definitely believe it’s possible, and it’s a good policy, because this is what consumers wants to know, right? In our real estate industry, if I’m trying to refinance my home, the appraiser is going to come, he will look into it, he will sit with me, then he will send me, “Deep, your house is worth $1.5 million dollar.” He will provide me the data that he used to come to that decision—he used the neighborhood information, he used the recent sold data.

And that, at the end of the day, gives confidence back to the consumer, and also it shows that this is not because this appraiser who came to my home didn’t like me for XYZ reason, and he end up giving me something wrong; so, I completely agree that we need to be transparent. We need to share why a decision has been made, and at the same time we should allow people to come and understand it better, and make those decisions better. So, I think those guidelines need to be put into place, because humans tend to be much more biased in their decision-making process, and the machines take the bias out, and bring more unbiased decision making.

Right, I guess the other side of that coin, though, is that you take a world of information about who defaulted on their loan, and then you take you every bit of information about, who paid their loan off, and you just pour it all in into some gigantic database, and then you mine it and you try to figure out, “How could I have spotted these people who didn’t pay their loan? And then you come up with some conclusion that may or may not make any sense to a human, right? Isn’t that the case that it’s weighing hundreds of factors with various weights and, how do you tease out, “Oh it was this”? Life isn’t quite that simple, is it?

No, it is not, and demystifying this whole black box has never been simple. Trust us, we face those challenges in the real estate industry on a day-to-day basis—we have Trulia’s estimates—and it’s not easy. At the end, we just can’t rely totally on those algorithms to make the decisions for us.

I will give one simple example, of how this can go wrong. When we were training our computer vision system, and, you know, what we were doing was saying, “This is a window, this is a window.” Then the day came when we said, “Wow, our computer vision can say I will look at any image, and known this is a window.” And one fine day we got an image where there is a mirror, and there is a reflection of a window on the mirror, and our computer said, “Oh, Deep, this is a window.” So, this is where big data and small data come into a place, where small data can make all these predictions and goes wrong completely.

This is where—when you’re talking about all this data we are taking in to see who’s on default and who’s not on default—I think we need to abstract, and we need to at least make sure that with this aggregated data, this computational data, we know what the reference points are for them, what the references are that we’re checking, and make sure that we have the right checks and balances so that machines are not ultimately making all the calls for us.

You’re a positive guy. You’re like, “We’re not going to build an AGI, it’s not going to take over the world, people are going to be able to use narrow AI to grow their productivity, we’re not going to have unemployment.” So, what are some of the pitfalls, challenges, or potential problems with the technology?

I agree with you, it’s being positive. Realistically, looking into the data—and I’m not saying that I have the best data in front of me—I think what is the most important is we need to look into history, and we need to see how we evolved, and then the Internet came and what happened.

The challenge for us is going to be that there are businesses and groups who believe that artificial intelligence is something that they don’t have to worry about, and over a period of time artificial intelligence is going to start becoming more and more a part of business, and those who are not able to catch up with this, they’re going to see the unemployment rate increase. They’re going to see company losses increase because some of the decisions they’re not making in the right way.

You’re going to see companies, like Lehman Brothers, who are making all these data decisions for their clients by not using machines but relying on humans, and these big companies fail because of them. So, I think, that’s an area where we are going to see problems, and bankruptcies, and unemployment increases, because of they think that artificial intelligence is not for them or their business, that it’s never going to impact them—this is where I think we are going to get the most trouble.

The second area of trouble is going to be security and privacy, because all this data is now floating around us. We use the Internet. I use my credit card. Every month we hear about a new hack—Target being hacked, Citibank being hacked—all this data physically-stored in the system and it’s getting hacked. And now we’ll have all this data wirelessly transmitting, machines talking to each of their devices, IoT devices talking to each other—how are you we going to make sure that there is not a security threat? How are we going to make sure that no one is storing my data, and trying to make assumptions, and enter into my bank account? Those are the two areas where I feel we are going to see, in coming years, more and more challenges.

So, you said privacy and security are the two areas?

Denial of accepting AI is the one, and security and privacy is the second one—those are the two areas.

So, in the first one, are there any industries that don’t need to worry about it, or are you saying, “No, if you make bubble-gum you had better start using AI?

I will say every industry. I think every industry needs to worry about it. Some industries may adapt the technologies faster, some may go slower, but I’m pretty confident that the shift is going to happen so fast that, those businesses will be blindsided—be it small businesses or mom and pop shops or big corporations, it’s going to touch everything.

Well with regard to security, if the threat is artificial intelligence, I guess it stands to reason that the remedy is AI as well, is that true?

The remedy is there, yes. We are seeing so many companies coming and saying, “Hey, we can help you see the DNS attacks. When you have hackers trying to attack your site, use our technology to predict that this IP address or this user agent is wrong.” And we see that to tackle the remedy, we are building an artificial intelligence.

But, this is where I think the battle between big data and small data is colliding, and companies are still struggling. Like, phishing, which is a big problem. There are so many companies who are trying to solve the phishing problem of the emails, but we have seen technologies not able to solve it. So, I think AI is a remedy, but if we stay just focused on the big data, that’s, I think, completely wrong, because my fear is, a small data set can completely destroy the predictions built by a big data set, and this is where those security threats can bring more of an issue to us.

Explain that last bit again, the small data set can destroy…?

So, I gave the example of computer vision, right? There was research we did in Berkeley where we trained machines to look at pictures of cats, and then suddenly we saw the computer start predicting, “Oh, this is this kind of a cat, this is cat one, cat two, this is a cat with white fur.” Then we took just one image where we put the overlay of a dog on the body of a cat, and the machines ended up predicting, “That’s a dog,” not seeing that it’s the body of a cat. So, all the big data that we used to train our computer vision, just collapsed with one photo of a dog. And this is where I feel that if we are emphasizing so much on using the big data set, big data set, big data set, are there smaller data sets which we also need to worry about to make sure that we are bridging the gap enough to making sure that our securities are not compromised?

Do you think that the system as a whole is brittle? Like, could there be an attack of such magnitude that it impacts the whole digital ecosystem, or are you worried more about, this company gets hacked and then that one gets hacked and they’re nuisances, but at least we can survive them?

No, I’m more worried about the holistic view. We saw recently, how those attacks on the UK hospital systems happened. We saw some attacks—which we are not talking about—on our power stations. I’m more concerned about those. Is there going to be a day when we have built massive infrastructures that are reliant on computers—our generation of power and the supply of power and telecommunications—and suddenly there is a whole outage which can take the world to a standstill, because there is a small hole which we never thought about. That, to me, is the bigger threat than the stand alone individual things which are happening now.

That’s a hard problem to solve, there’s a small hole on the internet that we’ve not thought about that can bring the whole thing down, that would be a tricky thing to find, wouldn’t it?

It is a tricky thing, and I think that’s what I’m trying to say, that most of the time we fail because of those smaller things. If I go back, Byron, and bring the artificial general intelligence back into a picture, as human beings it’s those small, small decisions we make—like, I make a fast decision when an animal is approaching very close to me, so close that my senses and my emotions are telling me I’m going to die—and this is where I think sometimes we tend to ignore those small data sets.

I was in a big debate around those self-driven cars which are shaping up around us, and people were asking me when will we see those self-driven cars on a San Francisco street. And I said, “I see people doing crazy jaywalking every day,” and accidents are happening with human drivers, no doubt, but the scale can increase so fast if those machines fail. If they have one simple sensor which is not working at that moment in time and not able to get one signal, it can kill human beings much faster as compared to what human beings are killing, so that’s the rational which I’m trying to put here.

So, one of my questions that I was going to ask you, is, do you think AI is a mania? Like it’s everywhere but it seems like, you’re a person who says every industry needs to adopt it, so if anything, you would say that we need more focus on it, not less, is that true?

That’s true.

There was a man in the ‘60s named Weizenbaum who made a program called ELIZA, which was a simple program that you would ask a question, say something like, I’m having a bad day,” and then it would say, “Why are you having a bad day?” And then you would say, I’m having a bad day because I had a fight with my spouse,” and then would ask, “Why did you have a fight? And so, it’s really simple, but Weizenbaum got really concerned because he saw people pouring out their heart to it, even though they knew it was a program. It really disturbed him that people developed emotional attachment to ELIZA, and he said that when a computer says, “I understand,” that it’s a lie, that there’s no “I,” there’s nothing that understands anything. 

Do you worry that if we build machines that can imitate human emotions, maybe the care for people or whatever, that we will end up having an emotional attachment to them, or that that is in some way unhealthy?

You know, Byron, it’s a very great question. I think, also pick out a great example. So, I have Alexa at my home, right, and I have two boys, and when we are in a kitchen—because Alexa is in our kitchen—my older son comes home and says, “Alexa, what’s the temperature look like today?” Alexa says, “Temperature is this,” and then he says, “Okay, shut up,” to Alexa. My wife is standing there saying “Hey, don’t be rude, just say, ‘Alexa stop.’” You see that connection? The connection is you’ve already started treating this machine as a respectful device, right?

I think, yes, there is that emotional connection there, and that’s getting you used to seeing it as part of your life in an emotional connection. So, I think, yes, you’re right, that’s a danger.

But, more than Alexa and all those devices, I’m more concerned about the social media sites, which can have much more impact on our society than those devices. Because those devices are still physical in shape, and we know that if the Internet is down, then they’re not talking and all those things. I’m more concerned about these virtual things where people are getting more emotionally attached, “Oh, let me go and check what my friends been doing today, what movie they watched,” and how they’re trying to fill that emotional gap, but not meeting individuals, just seeing the photos to make them happy. But, yes, just to answer your question, I’m concerned about that emotional connection with the devices.

You know, it’s interesting, I know somebody who lives on a farm and he has young children, and, of course, he’s raising animals to slaughter, and he says the rule is you just never name them, because if you name them then that’s it, they become a pet. And, of course, Amazon chose to name Alexa, and give it a human voice; and that had to be a deliberate decision. And you just wonder, kind of, what all went into it. Interestingly, Google did not name theirs, it’s just the Google Assistant. 

How do you think that’s going to shake out? Are we just provincial, and the next generation isn’t going to think anything of it? What do you think will happen?

So, is your question what’s going to happen with all those devices and with all those AI’s and all those things?

Yes, yes.

As of now, those devices are all just operating in their own silo. There are too many silos happening. Like in my home, I have Alexa, I have a Nest, those plug-ins. I love, you know, where Alexa is talking to Nest, “Hey Nest, turn it off, turn it on.” I think what we are going to see over the next five years is that those devices are communicating with each other more, and sending signals, like, “Hey, I just saw that Deep left home, and the garage door is open, close the garage door.”

IoT is popping up pretty fast, and I think people are thinking about it, but they’re not so much worried about that connectivity yet. But I feel that where we are heading is more of the connectivity with those devices, which will help us, again, compliment and make the smart choices, and our reliance on those assistants is going to increase.

Another example here, I get up in the morning and the first thing I do is come to the kitchen and say Alexa, “Put on the music, Alexa, put on the music, Alexa, and what’s the weather going to look like?” With the reply, “Oh, Deep, San Francisco is going to be 75,” then Deep knows Deep is going to wear a t-shirt today. Here comes my coffee machine, my coffee machine has already learned that I want eight ounces of coffee, so it just makes it.

I think all those connections, “Oh, Deep just woke up, it is six in the morning, Deep is going to go to office because it’s a working day, Deep just came to kitchen, play this music, tell Deep that the temperature is this, make coffee for Deep,” this is where we are heading in next few years. All these movies that we used to watch where people were sitting there, and watching everything happen in the real time, that’s what I think the next five years is going to look like for us.

So, talk to me about Trulia, how do you deploy AI at your company? Both customer facing and internally?

That’s such an awesome question, because I’m so excited and passionate because this brings me home. So, I think in artificial intelligence, as you said, there are two aspects to it, one is for a consumer and one is internal, and I think for us AI helps us to better understand what our consumers are looking for in a home. How can we help move them faster in their search—that’s the consumer facing tagline. And an example is, “Byron is looking at two bedroom, two bath houses in a quiet neighborhood, in good school district,” and basically using artificial intelligence, we can surface things in much faster ways so that you don’t have to spend five hours surfing. That’s more consumer facing.

Now when it comes to the internal facing, internal facing is what I call “data-driven decision making.” We launch a product, right? How do we see the usage of our product? How do we predict whether this usage is going to scale? Are consumers going to like this? Should we invest more in this product feature? That’s the internal facing we are using artificial intelligence.

I don’t know if you have read some of my blogs, but I call it data-driven companies—there are two aspects of the data driven, one is the data-driven decision making, this is more of an analyst, and that’s the internal reference to your point, and the external is to the consumer-facing data-driven product company, which focuses on how do we understand the unique criteria and unique intent of you as a buyer—and that’s how we use artificial intelligence in the spectrum of Trulia.

When you say, “Let’s try to solve this problem with data, is it speculative, like do you swing for the fences and miss a lot? Or, do you look for easy incremental wins? Or, are you doing anything that would look like pure science, like, “Let’s just experiment and see what happens with this? Is the science so nascent that you, kind of, just have to get in there and start poking around and see what you can do?

I think it’s both. The science helps you understand those patterns much faster and better and in a much more accurate way, that’s how science helps you. And then, basically, there’s trial and error, or what we call an, “A/B testing” framework, which helps you to validate whether what science is telling you is working or not. I’m happy to share an example with you here if you want.

Yeah, absolutely.

So, the example here is, we have invested in our computer vision which is, we train our machines and our machines basically say, “Hey, this is a photo of a bathroom, this is a photo of a kitchen,” and we even have trained that they can say, “This is a kitchen with a wide granite counter-top.” Now we have built this massive database. When a consumer comes to the Trulia site, what they do is share their intent, they say, “I want two bedrooms in Noe Valley,” and the first thing that they do when those listings show up is click on the images, because they want to see what that house looks like.

What we saw was that there were times when those images were blurred, there were times when those images did not match up with the intent of a consumer. So, what we did with our computer vision, we invested in something called “the most attractive image,” which basically takes the three attributes—it looks into the quality of an image, it looks into the appropriateness of an image, and it looks into the relevancy of an image—and based on these three things we use our conventional neural network models to rank the images and we say, “Great, this is the best image.” So now when a consumer comes and looks at that listing we show the most attractive photo first. And that way, the consumer gets more engaged with that listing. And what we have seen— using the science, which is machine learning, deep learning, CNM models, and doing the A/B testing—is that this project increased our enquiries for the listing by double digits, so that’s one of the examples which I just want to share with you.

That’s fantastic. What is your next challenge? If you could wave a magic wand, what would be the thing you would love to be able to do that, maybe, you don’t have the tools or data to do yet?

I think, what we haven’t talked about here and I will use just a minute to tell you, that what we have done is we’ve built this amazing personalization platform, which is capturing Byron’s unique preferences and search criteria, we have built machine learning systems like computer vision recommender systems and the user engagement prediction model, and I think our next challenge will be to keep optimizing the consumer intent, right? Because the biggest thing that we want to understand is, “What exactly is Byron looking into?” So, if Byron visits a particular neighborhood because he’s travelling to Phoenix, Arizona, does that mean you want to buy a home there, or Byron is in San Francisco and you live here in San Francisco, how do we understand?

So, we need to keep optimizing that personalization platform—I won’t call it a challenge because we have already built this, but it is the optimization—and make sure that our consumers get what they’re searching for, keep surfacing the relevant data to them in a timely manner. I think we are not there yet, but we have made major inroads into our big data and machine learning technologies. One specific example, is Deep, basically, is looking into Noe Valley or San Francisco, and email and push notifications are the two channels, for us, where we know that Deep is going to consume the content. Now, the day we learn that Deep is not interested in Noe Valley, we stop sending those things to Deep that day, because we don’t want our consumers to be overwhelmed in their journey. So, I think that this is where we are going to keep optimizing on our consumer’s intent, and we’ll keep giving them the right content.

Alright, well that is fantastic, you write on these topics so, if people want to keep up with you Deep how can they follow you?

So, when you said “people” it’s other businesses and all those things, right? That’s what you mean?

Well I was just referring to your blog like I was reading some of your posts.

Yeah, so we have our tech blog, http://www.trulia.com/tech, and it’s not only me; I have an amazing team of engineers—those who are way smarter than me to be very candid—my data scientist team, and all those things. So, we write our blogs there, so I definitely ask people to follow us on those blogs. When I go and speak at conferences, we publish that on our tech blog, and I publish things on my LinkedIn profile. So, yeah, those are the channels which people can follow. Trulia, we also host data science meetups here in Trulia, San Francisco on the seventh floor of our building, that’s another way people can come, and join, and learn from us.

Alright, well I want to thank you for a fascinating hour of conversation, Deep.

Thank you, Byron.

Byron explores issues around artificial intelligence and conscious computers in his upcoming book The Fourth Age, to be published in April by Atria, an imprint of Simon & Schuster. Pre-order a copy here.

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}

Advertisements

Voices in AI – Episode 23: A Conversation with Pedro Domingos

.voice-in-ai-byline-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-byline-embed span {
color: #FF6B00;
}

In this episode Byron and Pedro Domingos talk about the master algorithm, machine creativity, and the creation of new jobs in the wake of the AI revolution.




0:00


0:00


0:00

var go_alex_briefing = {
expanded: true,
get_vars: {},
twitter_player: false,
auto_play: false
};

(function( $ ) {
‘use strict’;

go_alex_briefing.init = function() {
this.build_get_vars();

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘action’] ) {
this.twitter_player = ‘true’;
}

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘auto_play’] ) {
this.auto_play = go_alex_briefing.get_vars[‘auto_play’];
}

if ( ‘true’ == this.twitter_player ) {
$( ‘#top-header’ ).remove();
}

var $amplitude_args = {
‘songs’: [{“name”:”Episode 23: A Conversation with Pedro Domingos”,”artist”:”Byron Reese”,”album”:”Voices in AI”,”url”:”https:\/\/voicesinai.s3.amazonaws.com\/2017-05-31-pedro-domingos-(00-54-04).mp3″,”live”:false,”cover_art_url”:”https:\/\/voicesinai.com\/wp-content\/uploads\/2017\/12\/voices-headshot-card_preview.jpeg”}],
‘default_album_art’: ‘https://gigaom.com/wp-content/plugins/go-alexa-briefing/components/external/amplify/images/no-cover-large.png&#8217;
};

if ( ‘true’ == this.auto_play ) {
$amplitude_args.autoplay = true;
}

Amplitude.init( $amplitude_args );

this.watch_controls();
};

go_alex_briefing.watch_controls = function() {
$( ‘#small-player’ ).hover( function() {
$( ‘#small-player-middle-controls’ ).show();
$( ‘#small-player-middle-meta’ ).hide();
}, function() {
$( ‘#small-player-middle-controls’ ).hide();
$( ‘#small-player-middle-meta’ ).show();

});

$( ‘#top-header’ ).hover(function(){
$( ‘#top-header’ ).show();
$( ‘#small-player’ ).show();
}, function(){

});

$( ‘#small-player-toggle’ ).click(function(){
$( ‘.hidden-on-collapse’ ).show();
$( ‘.hidden-on-expanded’ ).hide();
/*
Is expanded
*/
go_alex_briefing.expanded = true;
});

$(‘#top-header-toggle’).click(function(){
$( ‘.hidden-on-collapse’ ).hide();
$( ‘.hidden-on-expanded’ ).show();
/*
Is collapsed
*/
go_alex_briefing.expanded = false;
});

// We’re hacking it a bit so it works the way we want
$( ‘#small-player-toggle’ ).click();
$( ‘#top-header-toggle’ ).hide();
};

go_alex_briefing.build_get_vars = function() {
if( document.location.toString().indexOf( ‘?’ ) !== -1 ) {

var query = document.location
.toString()
// get the query string
.replace(/^.*?\?/, ”)
// and remove any existing hash string (thanks, @vrijdenker)
.replace(/#.*$/, ”)
.split(‘&’);

for( var i=0, l=query.length; i<l; i++ ) {
var aux = decodeURIComponent( query[i] ).split( '=' );
this.get_vars[ aux[0] ] = aux[1];
}
}
};

$( function() {
go_alex_briefing.init();
});
})( jQuery );

.go-alexa-briefing-player {
margin-bottom: 3rem;
margin-right: 0;
float: none;
}

.go-alexa-briefing-player div#top-header {
width: 100%;
max-width: 1000px;
min-height: 50px;
}

.go-alexa-briefing-player div#top-large-album {
width: 100%;
max-width: 1000px;
height: auto;
margin-right: auto;
margin-left: auto;
z-index: 0;
margin-top: 50px;
}

.go-alexa-briefing-player div#top-large-album img#large-album-art {
width: 100%;
height: auto;
border-radius: 0;
}

.go-alexa-briefing-player div#small-player {
margin-top: 38px;
width: 100%;
max-width: 1000px;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info {
width: 90%;
text-align: center;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info div#song-time-visualization-large {
width: 75%;
}

.go-alexa-briefing-player div#small-player-full-bottom {
background-color: #f2f2f2;
border-bottom-left-radius: 5px;
border-bottom-right-radius: 5px;
height: 57px;
}

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}

Byron Reese: Hello, this is Voices in AI, brought to you by Gigaom, I’m Byron Reese. Today, I’m excited, our guest is none other than Pedro Domingos, professor at the University of Washington, but notable for his book The Master Algorithm: How the Quest for the Ultimate Learning Machine Will Remake our World. Pedro, welcome to the show.

Pedro Domingos: Thanks for having me.

What is artificial intelligence?

Artificial intelligence is getting computers to do things that traditionally require human intelligence, like reasoning, problem solving, common sense knowledge, learning, vision, speech and language understanding, planning, decision-making and so on.

And is it artificial in the sense that artificial turf is artificial, in that it isn’t really intelligence, it just looks like intelligence? Or is it actually truly intelligent, and it’s just the “artificial” demarks that we created it?

That’s a fun analogy. I hadn’t heard that before. No, I don’t think AI is like artificial turf. I think it’s real intelligence. It’s just intelligence of a different kind. We’re used to thinking of human intelligence, or maybe animal intelligence, as the only intelligence on the planet. What happens now is a different kind of intelligence. It’s a little bit like, does a submarine really swim? Or is it faking that it swims? Actually, it doesn’t really swim, but it can still travel underwater using very different ideas. Or, you know, does a plane fly even though it doesn’t flap its wings? Well, it doesn’t flap its wings but it does fly – and AI is a little bit like that. In some ways, actually, artificial intelligence is intelligent in ways that human intelligence isn’t. There are many areas where AI exceeds human intelligence, so I would say that they’re different forms of intelligence, but it is very much a form of intelligence.

And how would you describe the state of the art, right now?

So, in science and technology progress often happens in spurts. There are long periods of slow progress and then there are periods of very sudden, very rapid progress. And we are definitely in one of those periods of very rapid progress in AI, which was a long time in the making. AI is a field that’s fifty years old, and we had what was called the “AI spring” in the 80s, where it looked like it was going to really take off. But then that didn’t really happen at the end of the day, and the problem was that people back then were trying to do AI using what’s called “knowledge engineering.” If I wanted an AI system to do medical diagnosis, I had to interview doctors and program, you know, the doctor’s knowledge of diagnosis and the formal rules into the computers, and that didn’t scale.

The thing that has changed recently is that we have a new way to do AI, which is machine learning. Instead of trying to program the computers to do things, the computers program themselves by learning from data. So now what I do for medical diagnosis is I give the computer a database of patient records, what their symptoms and test results were and what the diagnosis was, and from just that, in thirty seconds, the computer can learn typically to do medical diagnosis better than human doctors. So, thanks to that, thanks to machine learning, we are now seeing a phase of very rapid progress. Also because the learning algorithms have gotten better, and very importantly – the beauty of machine learning is that, because the intelligence comes from the data, as the data grows exponentially the AI systems get more intelligent with essentially no extra work from us. So now AI is becoming very powerful. Just on the back of the weight of data that we have.

The other element, of course, is computing power. We need enough computing power to turn all that data into intelligent systems, but we do have those. So the combination of learning algorithms, a lot of data, and a lot of computing power is what is making the current progress happen.

And, how long do you think we can ride that wave? Do you think that machine learning is the path to an AGI, hypothetically? I mean, do we have, ten, twenty, forty more years of running with kind of the machine learning ball? Or, do we need another kind of breakthrough?

I think machine learning is definitely the path to, you know, artificial general intelligence. But I think pretty much… I think there are a few people in AI who would disagree with that. You know, your computer can be as intelligent as you want. If it can’t learn, you know, thirty minutes later it will be falling behind humans. So, machine learning really is essential to getting to intelligence. In fact, the whole idea of the singularity was I.J. Good back in the 50s who had this idea of a learning machine that could make a machine that learned better than it did. As a result of which you would have this succession of better and better, more and more intelligent machines until they left humans in the dust. Now, how long will it take? That’s very hard to predict, precisely because progress is not linear. I think the current bloom of progress at some point will probably plateau. I don’t think we’re on the verge of having general AI. We’ve come a thousand miles but there’s a million miles more to go. We’re going to need many more breakthroughs, and who knows where those breakthroughs will come from.

In the most optimistic view, maybe this will all happen in the next decade or two, because things will just happen one after another, and we’ll have it very soon. In the more pessimistic view, it’s just too hard and it’ll never happen. If you poll the AI experts, they never just say it’s going to be several decades. But the truth is nobody really knows for sure.

What is kind of interesting is not that people don’t know, and not that their forecast are kind of all over the map, but that knowledgeable people, if you look at the extreme estimates, five years are the most aggressive, and then the furthest out are like five hundred years. And what does that suggest to you? You know, if I went to my cleaners and I said, “Hey, when is my shirt going to be ready?” and they said, “Sometime between five and five hundred days.” I would be like, “Okay… something is going on here.” Why do you think the opinions are so variant on when we get an AGI?

Well, the cleaners, when they clean your shirt, it’s a very well-known, very repeatable process. They know how long it takes and it’s going to take the same thing this time, right? There are very few unknowns. The problem in AI is that we don’t even know what we don’t know. We have no idea what we’re missing, so some people think we’re not missing that much. Those are the optimists, saying, “Oh, we just need more data.” Right? Back in the 80s they said, “Oh, we just need more knowledge.” And then, that wasn’t the case, so that’s the optimistic view. The more pessimistic view is that this is a really, really hard problem and we’ve only scratched the surface, so the uncertainty comes from the fact that we don’t even know what we don’t know.

We certainly don’t know how the brain works, right? We have vague ideas of kind of like what different parts of it do, but in terms of how a thought is encoded, we don’t know. Do you think we need to know more about our own intelligence to make an AGI, or is it like, “No, that’s apples and oranges. It doesn’t really matter how the brain works. We’re building an AGI differently.”

Not necessarily. So, there are different schools of thought in AI, and this is part of what I talk about in my book. There is one thought, one school of thought in AI – the Connectionists – whose whole agenda is to reverse engineer the brain. They think that the shortest path is, you know, “here’s the competition, go reverse engineer it, figure out how it works, build it on the computer, and then we’ll have intelligence.” So that is definitely a plausible approach. I think it’s actually a very difficult approach, precisely because we understand so little about how the brain works. In some ways maybe it’s trying to solve a problem by way of solving the hardest of problems. And then there are other AI types, namely the Symbolists, whose whole idea is, “No, we don’t need to understand things at that low level. In fact, we’re just going to get lost in the weeds if we try to do that. We have to understand intelligence at a higher-level abstraction and we’ll get there much sooner that way. So forget how the brain works, that’s really not important.” Again, the analogy with the brains and airplanes is a good one. What the Symbolists say is, “If we try to make airplanes by building machines that will flap their wings we’ll never have them. What we need to do is understand the laws of physics and aerodynamics and then build machines based on that.” So there are different schools of thought. And I actually think it’s good that there are different schools of thought and we’ll see who gets there first.

So, you mentioned your book, The Master Algorithm, which is of course required reading in this field. Can you give the listener who may not be as familiar with it, an overview of what is The Master Algorithm? What are we looking for?

Yeah, sure. So the book is essentially an introduction to machine learning for a general audience. So not just for technical people, but business people, policy makers, just citizens and the people who are curious. It talks about the impact that machine learning is already having in the world. A lot of people think that these things are science fiction, but there are already in their lives, they just don’t know it. It also looks at the future and what we can expect coming down the line. But mainly, it is an introduction to what I was just describing. That there are five main schools of thought in machine learning. There’s the people who want to reverse engineer the brain; the ones who want to simulate evolution; the ones who do machine learning by automating the scientific method; the ones who use Bayesian statistics, and the ones who do reasoning by analogy, like people do in everyday life. And then I look at what these different methods can and can’t do.

The name The Master Algorithm comes from this notion that the machine learning algorithm is a master algorithm in the same sense that a master key opens all doors. A learning algorithm can do all sorts of different things while being the same algorithm. This is really what’s extraordinary about machine learning, is that, in traditional computer science, if I want the computer to play chess I have to write a program explaining how to play chess; and if I wanted to drive a car, I had to write a program explaining how to drive a car. With machine learning the same learning algorithm can learn to play chess or drive a car or do a million different other things. Just by learning by the appropriate data. And each of these tribes of machine learning has its own master algorithm. The more optimistic members of that tribe believe that you can do everything with that master algorithm. My contention in the book is that each of these algorithms is only solving part of the problem. What we need to do is unify them all into a grand theory of machine learning, in the same way that physics has a standard model and biology has a central dogma. And then, that will be the true master algorithm. And I suggest some paths towards that algorithm, and I think we’re actually getting pretty close to it.

One thing I found empowering in the book, and you state it over and over at the beginning is that the master algorithm is aspirationally accessible for a wide range of people. You basically said, “You, listening to the book, this is still a field where the layman can still have some amount of breakthrough.” Can you speak to that for just a minute?

Absolutely, in fact that’s part of what got me into machine learning, is that, unlike physics or mathematics or biology that are very mature fields and you really can only contribute once you have at least a PhD; computer science and AI and machine learning are still very young. So, you could be a kid in a garage and have a great idea that will be transformative. And I hope that that will happen. I think, even after we find this master algorithm that’s the unification of the five current ones, as we were talking about, we will still be missing some really important, really deep ideas. And I think in some ways, someone coming from outside the field is more likely to find those, than those of us who are professional machine learning researchers, and are already thinking along these tracks of these particular schools of thought. So, part of my goal in writing the book was to get people who are not machine learning experts thinking about machine learning and maybe having the next great ideas that will get us closer to AGI.

And, you also point out in the book why you believe that we know that such a thing is possible, and one of your proof points is our intelligence.

Exactly.

Can you speak to that?

Yeah, so this is, of course, one of those very ambitious goals that people should be at the outset a little suspicious of, right? Is this, like the philosopher’s stone or the perpetual motion machine, is it really possible? And again some people don’t think it’s possible. I think there’s a number of reasons why I’m pretty sure it is possible, one of which is that we already have existing proof. One existing proof is our brain, right? As long as you believe in reductionism, which all scientists do, then the way your brain works can be expressed as an algorithm. And if I program that algorithm into a computer, then that algorithm can learn everything that your brain can. Therefore, in that sense, at least, one version of the master algorithm already exists. Another one is evolution. Evolution created us and all life on Earth. And it is essentially an algorithm, and we roughly understand how that algorithm works, so there is another existing instance of the master algorithm.

Then there’s also, besides these more empirical reasons, there’s also theoretical reasons that tell us that a master algorithm exists. One of which is that for each of the five tribes, for their master algorithm there’s a theorem that says, if you give enough data to this algorithm it can learn any function. So, at least at that level we already know that master algorithms exist. Now the question is how complicated will it be, how hard it will be to get us there? How broadly good would that algorithm be in terms of learning from a reasonable amount of data in a reasonable amount of time?

You just said all scientists are reductionist. Is that necessarily the case, like, can you not be a scientist and believe in something like strong emergence, and say, “Actually you can’t necessarily take the human mind down to individual atoms and kind of reconstruct…”

Yeah, yeah, absolutely, so, what I mean… this is a very good point. In fact, in the sense that you’re talking about we cannot be reductionists in AI. So what I mean by reductionist is just the idea that we can decompose a complex system into simpler, smaller parts that interact and that make up the system. This is how all of the sciences and engineering works. But very much, this does not preclude the existence of emergent properties. So, the system can be more than the sum of its parts, if it’s non-linear. And very much the brain is a non-linear system. And that’s what we have to do to reach AI. You could even say that machine learning is the science of emergent properties. In fact, one of the names by which it has been know in some quarters is “self organizing systems.” And in fact, what makes AI hard, the reason we haven’t already solved it, is that the usual divide and conquer strategy that scientists and engineers follow of dividing problems into smaller and smaller sub-problems and then solving the sub-problems and putting the solutions together… that tends not to work in AI, because the subsystems are very strongly coupled together. So, this is a harder problem and there are emergent properties, but that does not mean that you can’t reduce it to these pieces, it’s just a harder thing to do.

Marvin Minsky, I remember, talked about how, you know, we kind of got tricked a little bit by the fact that it takes very few fundamental laws of the universe to understand most of physics. The same with electricity. The same with magnetism. There are very few simple laws to explain everything that happens. And so the hope had been that intelligence would be like that. Are we giving up on that notion?

Yes, so, again there are different views within AI on this. I think at one end there are people who hope we will discover a few laws of AI and those would solve everything. At the other end of the spectrum there are people like Marvin Minsky who just think that intelligence is a big, big pile of hacks. He even has a book that’s like one of these tricks per page, and who knows how many more there are. I think, and most people in AI believe, that it’s somewhere in between. If AI is just a big pile of hacks, we’re never going to get there. And it can’t really be just a pile of hacks, because if the hacks were so powerful as to create intelligence, then you can’t really call them hacks.

On the other hand, you know, you can’t reduce it to a few laws, like Newton’s laws. So this idea of the master algorithm is, at the end of the day we will find one algorithm that does intelligence, but that algorithm is not going to be a hundred lines of code. It’s not going to be millions of lines of code either. You know, if the algorithm is thousands or maybe tens of thousands of lines of codes, that would be great. It’ll still be a more complex theory, much more complex than the ones we have in physics, but it’ll be much, much simpler than what people like Marvin Minsky envisioned.

And if we find the master algorithm… is that good for humanity?

Well, I think it’s good or bad depending on what we do with it. Like all technology, machine learning gives us more power. You can think of it as a superpower, right? Telephones let us speak at a distance, airplanes let us fly, and machine learning let’s us predict things and lets technology adapts automatically to our needs. All of this is good if we use it for good. If we use it for bad, it will be bad, right? The technology itself doesn’t know how it’s going to be used and part of my reason for writing this book is that everybody needs to be aware of what machine learning is, and what it can do, so that they can control it. Because, otherwise, machine learning will just give more control to those few who actually know how to use it.

I think, if you look at the history of technology, over time, in the end, the good tends to prevail over the bad, which is why we live in a better world today than we did 200 years or 2,000 years ago. But we have to make it happen, right? It just doesn’t fall from the tree like that.

And so, in your view, the master algorithm is essentially synonymous with AGI in the sense that it can figure anything out, it’s a general artificial intelligence. Would it be conscious?

Yeah, so, by the way, I wouldn’t say the master algorithm is synonymous with AGI. I think it’s the enabler of AGI. Once we have a master algorithm we’re still going to need to apply it to vision, and language, and reasoning, and all these things. And then, we’ll have AGI. So, one way to think about this is that it’s an 80/20 rule. The master algorithm is the 20% of the work that gets you 80% of the way, but you still need to do the rest, right? So maybe this is a better way to think about it.

Fair enough. So, I’ll just ask the question a little more directly. What do you think consciousness is?

That’s a very good question. The truth is what makes consciousness simultaneously so fascinating and so hard is that at the end of the day, if there is one thing that I know, and it’s that I’m conscious, right? Descartes said, “I think, therefore I am,” but maybe he should’ve said “I’m conscious, therefore I am.” The laws of physics, who knows, they might even be wrong. But the fact that I’m conscious right now is absolutely unquestionable. So, everybody knows that about themselves. At the same time, because consciousness is a subjective experience, it doesn’t lend itself to the scientific method. What are reproducible experiments when it comes to consciousness? That’s one aspect, the other one is that consciousness is a very complex emergent phenomenon. So, nobody really knows what it is, or understands it, even at a fairly shallow level. Now, the reason we believe others have consciousness – you believe that I have consciousness because you’re a human being, I’m a human being, so since you have consciousness I probably have consciousness as well. And this is really the extent of it. For all you know, I could be a robot taking to you right now, passing the Turing test, and not be conscious at all.

Now, what happens with machines? How can we tell whether a machine is conscious or not? This has been grist for the mill of a lot of philosophers over the last few decades. I think the bottom line is that once a computer starts to act like it’s consciousness, we will treat it as if it’s consciousness, we will grant it consciousness. In fact, we already do that, even with very simple chatbots and whatnot. So, as far as everyday life goes, it actually won’t be long. In some ways, it’ll happen sooner, that people treat computers as being conscious, than the computers being truly intelligent. Because that’s all we need, right? We project these human properties onto things that act humanly, even in the slightest way.

Now, at the end of the day, if you gaze down into that hardware and those circuits… is there really consciousness there? I don’t know if we will ever be able to answer that question. Right now, I actually don’t see a good way. I think there will come a point at which we understand consciousness well enough, because we understand the brain well enough, that we are fairly confident that we can tell whether something is conscious or not. And then at that point I think we will apply this criteria to these machines, and these machines, at least the ones that have been designed to be conscious, will pass the tests, so, we will believe that machines have consciousness. But, you know, we can never be totally sure.

And, do you believe consciousness is required for a general intellect?

I think there are many kinds of AI and many AI applications that do not require consciousness. So, for example, if I tell a machine learning system to go solve cancer – that’s one of the things we’d like to do, cure cancer, and machine learning is a very big part of the battle to cure cancer – I don’t think it requires consciousness at all. It requires a lot of searching, and understanding molecular biology and trying different drugs, maybe designing drugs, etc. So, 90% of AI that will involve no consciousness at all.

There are some applications of AI and some types of AI that will require consciousness, or something indistinguishable from of it, for example, house bots. We would like to have a robot that cooks dinner and does the dishes and makes the bed and whatnot. In order to do all those things, the robot has to have all the capabilities of a human, has to integrate all of these senses, vision, and touch, and perception, and hearing and whatnot, and then make decision based on it. I think this is either going to be consciousness or indistinguishable from it.

Do you think there will be problems that arise if that happens? Let’s say you build Rosie the Robot, and you don’t know, like you said, deep down inside, if the robot is conscious or merely acting as if it is. Do you think at that point we have to have this question of, “Are we fine enslaving what could be a conscious machine to plunge our toilet for us?”

Well, that depends on what you consider enslaving, right? So, one way to look at this, and it’s the way I look at it, is that these are still just machines, right? Just because they have consciousness doesn’t mean that they have human rights. Human rights are for humans. I don’t think there’s such thing as robot rights. The deeper question here is, what gives something rights? One school of thought is that it’s the ability to suffer that gives you rights and therefore animals should have rights. But, if you think about it historically, the idea of having animal rights, even 50 years ago would’ve seemed absurd. So, by the same standard, maybe 50 years from now, people will want to have robot rights. In fact, there are some people already talking about it. I think it’s a very strange idea. And often people talk about, oh well, will the machines be our friends or will they be our slaves? Will the be our equal? Will they be inferior? Actually, I think this whole way of framing things is mistaken. You know, the robots will be neither our equal nor our slaves. They will be our extensions, right?

Robots are technology, they augment us. I think it’s not so much that the machines will be conscious, but that through machines we will have a bigger consciousness. In the same way that, for example, the internet already gives us a bigger consciousness than we had when there was no internet.

So, robots leads us to a topic that’s on the news literally every day, is the prospect that automation and technological advance will eliminate jobs faster than it can create new ones, or, it will eliminate jobs and replace them with inaccessible kinds of jobs. What do you think about that? What do you think the future holds?

I think we have to distinguish between the near term, by which I mean the next ten years or so, and the long term. In the near term, I think some jobs will disappear, just like jobs have disappeared to automation in the past. AI is really automation on steroids. So I think what’s going to happen in the near term is not so different from what has happened in the past. Some jobs will be automated, so some jobs will disappear. But many new jobs will appear as well. It’s always easier to see the jobs that disappear than the ones that appear. Think for example of being an app developer. There’s millions of people today who make a living today being an app developer. Ten years ago that job didn’t exist. Fifty years ago you couldn’t even imagine that job. Two hundred years ago ninety-something percent of Americans were farmers, and then farming got automated. Now today only 2% of Americans work in agriculture. That doesn’t mean that the other 98% are unemployed. They’re just doing all these jobs that people couldn’t even imagine before. I think a lot of that is what’s going to happen here. We will see entirely new job categories appear. We will also see, on a more mundane level, more demand for lots of existing jobs. For example, I think truck drivers should be worried about the future of their jobs, because self-driving cars are coming, so there will be an end point. There are many millions of truck drivers in the US alone. It’s one of the most widespread occupations. But now, what will they do? People say, “Oh, you can’t turn truck drivers into programmers.” Well, you don’t have to turn them into programmers. Think about what’s going to happen, because trucks are self-driving, goods will cost less. Goods will cost less, people will have more money in their pockets, they will spend it on other things. Like, for example, having a bigger, better houses. And therefore there will be more demand for construction workers and some of these truck drivers will become construction workers and so on.

You know, having said all that, I think that in the near term the most important thing that’s going to happen to jobs is actually, neither the ones that will disappear, nor the ones that will appear, most jobs will be transformed by AI. The way I do my job will change because some parts will become automated, but now I will be able to do more things better or more than I could do before, when I didn’t have the automation. So, really the question everybody needs to think about is, what parts of my job can I automate? Really the best way to protect your job from automation is to automate it yourself… and then what can I do, using these machine learning tools?

Automation is like having a horse. You don’t try to outrun a horse; you ride the horse. And we have to ride automation, to do our jobs better and in more ways than we can now.

So, it doesn’t sound like you’re all that pessimistic about the future of employment?

I’m optimistic, but I also worry. I think that’s a good combination. I think if we’re pessimistic we’ll never do anything. Again, if you look at the history of technology, the optimists at the end of the day are the ones who made the world a better place, not the pessimists. But at the same time, we need to… naïve optimism is very dangerous, right? We need to worry continuously about all the things that could go wrong and make sure that they don’t go wrong. So I think that a combination of optimism and worry is the right one to have.

Some people say we’ll find a way to merge, mentally, with the AI. Is that even a valid question? What do you think of it?

I think that’s what’s going to happen. In fact, it’s already happening. We are going to merge with our machines step by step. You know, like a computer is a machine that is closer to us than a television. A smartphone is closer to us than a desktop is, and the laptop is somewhere in between. And we’re already starting to see these things such as Google Glass and augmented reality, where in essence the computer is extending our senses, and extending our part to do things. And Elon Musk has this company that is going to create an interface between neurons and computers, and in fact, in research labs this already exists. We have colleagues, I have colleagues that work on that. They’re called brain-computer interfaces. So, step-by-step, right? The way to think about this is, we are cyborgs, right? Human beings are actually the cyborg species. From day one, we were of one with our technology. Even our physiology would be different if we couldn’t do things like light fires and throw spears. So this has always been an ongoing process. Part of us is technology, and that will become more and more so in the future. Also with things like the Internet, we are connecting ourselves into a bigger, you know… Humanity itself is an emergent phenomenon, and having the Internet and computers allows a greater level to emerge. And I think, exactly how this happened and when, of course, is up for grabs, but that’s the way things are going.

So, you mentioned in passing a minute ago the singularity. Do you believe that that is what will happen as is commonly thought? That there is going to be this kind of point in the reasonably near future from which we cannot see anything beyond it because we don’t have any frame of reference?

I don’t believe a singularity will happen in those terms. So this idea of exponentially increasing progress that goes on forever… that’s not going to happen because it’s physically impossible, right? No exponential goes on forever. It always flattens out sooner or later. All exponentials are really what are called “S curves” in disguise. They go up faster and faster, and this is how all previous technology waves have looked, but then they flatten out, and finally they plateau. Also, this notion that at some point things will become completely incomprehensible for us… I don’t believe that either, because there will always be parts that we understand, number one, and there are limits to what any intelligence can do – human or non-human.

By that stance the singularity that has already happened. A hundred years ago the most advanced technology was maybe something like a car, right? And I could understand every part of how a car works, completely. Today we already have technology, like the computer systems that we have today. Nobody understands that whole system. Different people understand different parts, and with machine learning in particular, the thing that’s notable about machine learning algorithms is that they can do very complex things very well, and we have no idea how they’re doing them. And yet, we are comfortable with that because we don’t necessarily care about the details of how it is accomplished, we just care whether the medical diagnosis was correct or the patient’s cancer was cured, or the car is driving correctly. So I think this notion of the singularity is a little bit off.

Having said that, we are in the middle of one of these S curves. We are seeing very rapid progress, and by the time this has run its course, the would will be a very, very different place from what it is today.

How so?

All these things that we’ve been talking about. We will have intelligent machines surrounding us. Not just humanoid machines but intelligence on tap, right? In the same way that today you can use electricity for whatever you want just by plugging into a socket, you will be able to plug into intelligence. And indeed, the leading tech companies are already trying to make this happen. So there will be all these things that the greater intelligence enables. Everybody will have a home robot in the same way that they have a car. We will have this whole process that the Internet is enabling and that the intelligence on top of the Internet is enabling and the Internet of things and so on. There will something like this larger emergent being, if you will, that’s not just individual human beings or just societies. But again, it’s hard to picture exactly what that would be, but this is going to happen.

You know, it always makes the news when an artificial intelligence masters some game, like, we all know the list. You had chess, and then you had Jeopardy, of course, and then you had AlphaGo, and then recently you had poker. And I get that games are kind of a natural place, because I guess it’s a confined universe with very rigid, specific rules, and then there’s a lot of training data for teaching it how to function in that. Are there types of problems that machine learning isn’t suited to solve? I mean, just kind of philosophically, it doesn’t matter how good your algorithms are, or how much data you have, or how fast a computer is; it’s not the way to solve that particular problem.

Well, certainly some problems are much harder than others, and, as you say, games are easier in the sense that they are these very constrained, artificial universes. And that’s why we can do so well in them. In fact, the summary of what machine learning and AI are good for today is that they are good for these tasks that are somewhat well-defined and constrained.

What people are much better at are things that require knowledge of the world, they require common sense, they require integrating lots of different information. We’re not there yet. We don’t have the learning algorithms that can do that, so the learning algorithms that we have today are certainly good for some things but not others. But again, if we have the master algorithm then we will be able to do all these things and we are making progress towards them. So, we’ll see.

Any time I see a chatbot or something that’s trying to pass the Turing test, I always type the same first question, which is, “Which is bigger, a nickel or the sun?” And not a single one of them has ever answered it correctly.

Well, exactly. Because they don’t have common sense knowledge. It’s amazing what computers can do in some ways, and it’s amazing what they can’t do in others. It’s like these really simple pieces of common sense logic. In a way one of the big lessons that we’ve learned in AI is that automating the job of a doctor or a lawyer is actually easy. What is very hard to do with AI is what a three year old can do, right? If we could have a robot baby that can do what a one year old can do, and learn the same way, we would have solved AI. It’s much, much harder to do those things; things that we take for granted, like picking up an object, for example, or like walking around without tripping. We take this for granted because evolution spent five hundred million years developing it. It’s extremely sophisticated, but for us it’s below the conscious level. The things for us that we are conscious of and that we have to go to college for, well, we’re not very good at them; we just learned to do them recently. Those, the computers can do much better.

So, in some ways in AI, it’s the hard things that are easy and the easy things that are hard.

Does it mean anything if something finally passes the Turing test? And if so, when do you think that might happen? When will it say, “Well, the sun is clearly bigger than a nickel.”

Well, with all due respect to Alan Turing, who was a great genius and an AI pioneer. Most people in AI, including me, believe that the Turing test is actually a bad idea. The reason the Turing test is a bad idea is that it confuses being intelligent with being human. This idea that you can prove that you’re intelligent by fooling a human into thinking you’re a human is very weird, if you think about it. It’s like saying an airplane doesn’t fly until it can fool birds into thinking it’s a bird. That doesn’t make any sense. So, true intelligence can take many forms, not necessarily the human form. So in some ways we don’t need to pass the Turing test to have AI. And in other ways the Turing test is too easy to pass and by some standards has already been passed by systems that no one would call intelligent. Talking with someone for five minutes and fooling them into thinking you’re a human is actually not that hard, because humans are remarkably adept at projecting humanity into anything that acts human. In fact, even in the 60s there was this famous thing called ELIZA, that basically just picked up keywords in what you said and gave back these canned responses. And if you talked to ELIZA for five minutes you’d actually think that it was a human.

Although Weizenbaum’s observation was, even when people knew ELIZA was just a program, they still formed emotional attachments to it, and that’s what he found so disturbing.

Exactly, so human beings have this uncanny ability to treat things as human because that’s the only reference point that we have, right? This whole idea of reasoning by analogy, if we have something that behaves even a little bit like a human – because there’s nothing else in the universe to compare it to – we start treating it more like a human and project more human qualities into it. And, by the way, this is something that once companies start making bots… this is already happening with chatbots, like Siri and Cortana and whatnot, it’ll happen even more so with home robots. There’s going to be a race to make the robots more and more humanlike because if you form an emotional attachment to my product, that’s what I want, right? I’ll sell more of it and for higher price and so on and so forth. So, we’re going to see uncannily humanlike robots and AIs – whether this is a good or bad things is another matter.

What do you think creativity is, and wouldn’t AGI, by definition, be creative, right? It could write a sonnet, or…

Yeah, so… AGI by definition would be creative. One thing that you hear a lot this days, and that unfortunately is incorrect, is that, “Oh, we can automate these menial, routine jobs, but creativity is this deeply human thing that will never be automated.” And, this is kind of like a superficially plausible notion, when in fact, there are already examples, for example, of computers that could compose music. There is this guy, David Cope, professor at UC Santa Cruz. He has a computer program that will create music in the style of the composer of your choice. And he does this test where he plays a piece by Mozart, a piece by a human composer imitating Mozart, and a piece by his computer, by his system, and he did this at a conference that I was in, and asked people to vote for which one was the real Amadeus, and the real one won, but the second place was actually the computer. So a computer can already write Mozart better than a professional, highly educated human composer can. Computers have made paintings that are actually quite beautiful and striking, many of them. Computers these days write news stories. There’s this company called Narrative Fiction that will write news stories for you, and the likes of Forbes or Fortune – I forget which one it is – actually publish some of the things that they write. So it’s not a novel yet, but we will get there. And also, in other areas, like for example chess and AlphaGo are notable examples. Both Kasparov and Lee Sedol, when they were beaten by the computer, had this remarkable reaction saying, “Wow, the computer was so creative. It came up with these moves that I would never have thought of, that seemed dumb at first but turned out to be absolutely brilliant.” And computers have done things in mathematics, theorems and proofs and etc., all of which if done by humans would be considered highly creative. So, automating creativity is actually not that hard.

It’s funny, when Kasparov first said it seemed creative, what he was implying was that IBM cheated, that people had intervened. And IBM hadn’t. But, that’s a testament to just how…

There were actually two phases, right? He said that at first, so he was suspicious because, again, how could something not human actually be doing that? But then later, after the match when he had lost and so on, if you remember that was this move, right, that Deep Blue made that seemed like a crazy move, because of course when, you know, whatever, five moves later… And Kasparov had this expression, he said, like, “I could smell a new kind of intelligence playing against me.” Which is very interesting for us AI-types because we know exactly what was going on, right? It was these, you know, search algorithms and a whole bunch of technology that we understand fairly well. It’s interesting that from the outside this just seemed like a new kind of intelligence, and maybe it is.

He also said, “At least I didn’t enjoy beating him.” Which I guess someday, though, it may, right?

Oh, yeah, yeah! And you know, again, that could happen again depending on how we build them, right? The other very interesting thing that happened in that match – and again, I think it’s symptomatic – is that Kasparov is someone who always won by basically intimidating his opponents into submission. They just got scared of him, and then he beat them. But the thing that happened with Deep Blue was that Deep Blue couldn’t be intimidated by him; it was just a machine, right? As a result of which, Kasparov himself, suddenly for the first time in his life, probably, became insecure. And then what happened after he lost that game, is that in the following game he actually made these mistakes that he would never make, because he has suddenly, himself, became insecure.

Foreboding, isn’t it? We talked about emergence a couple of times. There’s the Gaia hypothesis that maybe all of the life on our planet has an emergent property – some kind of an intelligence that we can’t perceive any more than our cells can perceive us. Do you have any thoughts on that? And do you have any thoughts on if eventually the Internet could just become emergent – an emergent consciousness?

Right, so, I don’t believe in the Gaia hypothesis in the sense that the Earth as it is does not have enough self-regulating ability to achieve the homeostasis that living beings do. In fact, some things you get these negative feedback cycles where things actually go very wrong. So, most scientists don’t believe in the Gaia hypothesis for Earth today. Now, what I think – and a lot of other people think is the case – is that maybe the Gaia hypothesis will be true in the future because as the Internet expands and the Internet of the things with sensors all over the place, literally all over the planet, and a lot of actions continue being taken based on those sensors, to, among other things, preserve us and presumably other kinds of life on Earth. I think if we fast-forward a hundred years there’s a very good chance that Earth will look like Gaia, but it will be a Gaia that is technological as opposed to just biological. And in fact, I don’t think that there’s an opposition between technology and biology. I think technology will just be the extension of biology by other means, right? It’s biology that’s made by us. I mean, we’re creatures, the things that we make are also biology in that sense. So if you look at it that way maybe what has happened is that since the very beginning, Earth has been evolving towards Gaia, we just haven’t gotten there yet. But technology is very much part of getting there.

What do you think of the OpenAI initiative?

I think… so, the OpenAI initiative, it’s goal is to do AI for the common good. Because, you know, people like Elon Musk and Sam Altman were afraid that because the biggest quantity of AI research is being done inside companies, like Google and Facebook and Microsoft and Amazon and whatnot, it would be owned by them. And AI is very powerful, so it’s dangerous if AI is just owned by these companies. So their goal is to do AI research that is going to be open, hence the name, and available to everybody. I think this is a great agenda. So I very much agree with trying to do that. I think there’s nothing wrong with having a lot of AI research in companies. I think it’s important that there also be AI research that is in the public domain. And universities are one aspect of doing that, something like OpenAI is another example, something like the Allen Institute for AI is another example of doing AI for the public good in this way.

So, I think this is a good agenda. What they’re going to do exactly and what their chances of succeeding are, and how their style of AI will compare to the styles of AI that are being produced by these other labs, whether industry or academia, is something that remains to be seen. But I’m curious to see what they get out of it.

The worry from some people is that they make it analogous to a nuclear weapon in that if you say, “We don’t know how to build one, but we can get 99% of the way there and we’re going to share that with everybody on the planet,” and then you hope that the last little bit that makes it an AGI isn’t a bad actor of some kind. Does that make sense to you?

Yeah, yeah… I understand the analogy, but you have to remember that AI and nuclear weapons are very different for a couple of reasons. One is that nuclear weapons are essentially destructive things, right? Yeah, you can turn them into nuclear power, but they were invented to blow things up. Whereas AI is a tool that we use to do all sorts of things, like diagnose diseases and place ads on webpages, and things from big to small. But the thing that… the knowledge to build a nuclear bomb, is actually not that hard to come by. Fortunately, what is very hard to come by is the enriched uranium, or plutonium, to build the bomb. That’s actually what keeps any terrorist group from building a bomb. It’s not the lack of knowledge, it’s the lack of the materials. Now, in AI it’s actually very different, you just need computing power and you can just plug into the cloud and get that computing power. It’s more that AI is just algorithms – it’s already accessible. Lots of people can use it for whatever they want. In a way, the safety lies in actually having AI in the hands of everybody so that it’s not in the hands of a few. If only one person or one company had access to the master algorithm they would be too powerful. If everybody has access to the master algorithm then there will be competition, there will be collaboration, there will be like a whole ecosystem of things that happen, and we will be safer that way. Just as we are with the economy as it is. But, having said that, we will need something like an AI police. So, William Gibson in Neuromancer had this thing called the Turing police, right? The Turing police is AIs whose job is to police the AIs to make sure that they don’t go bad, or that they get stopped when they go bad. And again, this is no different from what already happens. We have highways, and bank robbers can use the highways to get away, that’s no reason to not have highways, but of course the police also need to have cars so they can catch the robbers, so I think it’s going to be a similar thing with AI.

When I do these chats with people in AI, science fiction writers always come up. They always reference them, they always had their favorites and whatnot. Do you have any books, movies, TV shows or anything like that that you think you watch them and you go, “Yes, that could happen. I see that?”

Unfortunately, a lot of the depictions of AI and robots in movies and TV shows is not very realistic, because the computers and robots are really just humans in disguise. This is how you make an interesting story is by making the robots act like humans. They have evil plan to take over the world or, somebody falls in love with them and things like that. And that’s how you make an interesting movie.

But real AIs, as we were talking about, are very different than that. So a lot of the movies that people associate with AI like Terminator, for example, are really not stuff that will happen. But with a provision that science fiction is a great source of self-fulfilling prophecies, right? People read those things and then they try to make them happen. So, who knows.

Having said that, what is an example of a movie depicting AI that I think could happen, and is fairly interesting and realistic? Well, one example is the movie Her. The movie Her is basically about a virtual assistant that is very human-like, and ten years ago that would’ve been a very strange movie. These days we already have things like Siri and Cortana and Google Now, that are, of course, still a far cry from Her. But I think we’re going to get closer and closer to that.

And final question: what are you working on, and are you going to write another book? What keeps you busy?

Two things: I think we are pretty close to unifying those five master algorithms, and I’m still working on that. That’s what I’ve been working on for the last ten years. And I think we’re almost there. I think once we’re there, the next thing is that, as we’ve been talking about, that’s not going to be enough. So we need something else. I think we need something beyond the existing five paradigms we have, and I’m working on a new type of learning, that I hope will actually take us beyond what those five could do. Some people have jokingly called it the sixth paradigm, and maybe my next book will be called, “The Sixth Paradigm.” That makes it sound like a Dan Brown novel, but that’s definitely something that I’m working on.

When you say you think the master algorithm is almost ready… will there be a “ta-da” moment like, here it is. Or is it kind of a gradualism?

It’s a gradual thing, right? Again, look at physics, they’ve unified three of the forces, right? You know, electromagnetism and the strong and weak forces. They still haven’t unified gravity with them. There are proposals like string theory to do that. These are how moments often only happen in retrospect. People propose a theory, and then maybe it gets tested, and then maybe it gets revised, and then finally when all the pieces are in place people go, “Oh wow.” And I think it’s going to be like that with the master algorithm as well. We have candidates, we have ways of putting these pieces together, it still remains to be seen whether they can do all the things that we want and how well they will scale, right? Scaling is very important because if it’s not scalable then it’s not really solving the problem. So, we’ll see.

All right, well thank you so much for being on the show.

Thanks for having me, this was great!

Byron explores issues around artificial intelligence and conscious computers in his upcoming book The Fourth Age, to be published in April by Atria, an imprint of Simon & Schuster. Pre-order a copy here.

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}

Gigaom Analyst Bob Egan and Futurist Rita J. King Lead Gigaom #MobilityTalk Chat on Twitter December 7th

Get the latest strategies in enterprise mobility, security, BYOD, and deployed devices in our 45 minute Twitter chat #MobilityTalk on 12/7 at 1:00 PM ET / 10:00 AM PT together with our partner Samsung Electronics America, as co-hosts Gigaom senior executive analyst Bob Egan and futurist Rita J. King, plus experts from Samsung’s security team talk about top enterprise trends in mobility you need to know.

With questions presented by Gigaom and sponsored by Samsung, the chat will follow along as these industry advisors tackle the answers that enterprise executives can’t do without. There will be some time at the end to take questions from the audience.

Check out the questions to be discussed below, and plan to tune in to Gigaom on Twitter, December 7th, 1:00 PM ET / 10:00 AM PT.

  • How can businesses make sure innovation doesn’t lower productivity?
  • Will deployed devices eventually replace BYOD in the enterprise?
  • How can CIOs convince their organizations that deployed devices are worthwhile?
  • When it comes to deploying devices, what are the biggest struggles in the enterprise?
  • What is the role of security in your business today?
  • How is security affecting your company’s growth?
  • What are the 3 things your business should consider when deploying devices?

Voices in AI – Episode 22: A Conversation with Rudina Seseri

.voice-in-ai-byline-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-byline-embed span {
color: #FF6B00;
}

In this episode, Byron and Rudina talk about the AI talent pool, cyber security, the future of learning, and privacy.




0:00


0:00


0:00

var go_alex_briefing = {
expanded: true,
get_vars: {},
twitter_player: false,
auto_play: false
};

(function( $ ) {
‘use strict’;

go_alex_briefing.init = function() {
this.build_get_vars();

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘action’] ) {
this.twitter_player = ‘true’;
}

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘auto_play’] ) {
this.auto_play = go_alex_briefing.get_vars[‘auto_play’];
}

if ( ‘true’ == this.twitter_player ) {
$( ‘#top-header’ ).remove();
}

var $amplitude_args = {
‘songs’: [{“name”:”Episode 22: A Conversation with Rudina Seseri”,”artist”:”Byron Reese”,”album”:”Voices in AI”,”url”:”https:\/\/voicesinai.s3.amazonaws.com\/2017-11-20-(01-05-05)-rudina-seseri.mp3″,”live”:false,”cover_art_url”:”https:\/\/voicesinai.com\/wp-content\/uploads\/2017\/11\/voices-headshot-card-3-1.jpg”}],
‘default_album_art’: ‘https://gigaom.com/wp-content/plugins/go-alexa-briefing/components/external/amplify/images/no-cover-large.png&#8217;
};

if ( ‘true’ == this.auto_play ) {
$amplitude_args.autoplay = true;
}

Amplitude.init( $amplitude_args );

this.watch_controls();
};

go_alex_briefing.watch_controls = function() {
$( ‘#small-player’ ).hover( function() {
$( ‘#small-player-middle-controls’ ).show();
$( ‘#small-player-middle-meta’ ).hide();
}, function() {
$( ‘#small-player-middle-controls’ ).hide();
$( ‘#small-player-middle-meta’ ).show();

});

$( ‘#top-header’ ).hover(function(){
$( ‘#top-header’ ).show();
$( ‘#small-player’ ).show();
}, function(){

});

$( ‘#small-player-toggle’ ).click(function(){
$( ‘.hidden-on-collapse’ ).show();
$( ‘.hidden-on-expanded’ ).hide();
/*
Is expanded
*/
go_alex_briefing.expanded = true;
});

$(‘#top-header-toggle’).click(function(){
$( ‘.hidden-on-collapse’ ).hide();
$( ‘.hidden-on-expanded’ ).show();
/*
Is collapsed
*/
go_alex_briefing.expanded = false;
});

// We’re hacking it a bit so it works the way we want
$( ‘#small-player-toggle’ ).click();
$( ‘#top-header-toggle’ ).hide();
};

go_alex_briefing.build_get_vars = function() {
if( document.location.toString().indexOf( ‘?’ ) !== -1 ) {

var query = document.location
.toString()
// get the query string
.replace(/^.*?\?/, ”)
// and remove any existing hash string (thanks, @vrijdenker)
.replace(/#.*$/, ”)
.split(‘&’);

for( var i=0, l=query.length; i<l; i++ ) {
var aux = decodeURIComponent( query[i] ).split( '=' );
this.get_vars[ aux[0] ] = aux[1];
}
}
};

$( function() {
go_alex_briefing.init();
});
})( jQuery );

.go-alexa-briefing-player {
margin-bottom: 3rem;
margin-right: 0;
float: none;
}

.go-alexa-briefing-player div#top-header {
width: 100%;
max-width: 1000px;
min-height: 50px;
}

.go-alexa-briefing-player div#top-large-album {
width: 100%;
max-width: 1000px;
height: auto;
margin-right: auto;
margin-left: auto;
z-index: 0;
margin-top: 50px;
}

.go-alexa-briefing-player div#top-large-album img#large-album-art {
width: 100%;
height: auto;
border-radius: 0;
}

.go-alexa-briefing-player div#small-player {
margin-top: 38px;
width: 100%;
max-width: 1000px;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info {
width: 90%;
text-align: center;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info div#song-time-visualization-large {
width: 75%;
}

.go-alexa-briefing-player div#small-player-full-bottom {
background-color: #f2f2f2;
border-bottom-left-radius: 5px;
border-bottom-right-radius: 5px;
height: 57px;
}

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}

Byron Reese: This is Voices in AI brought to you by Gigaom. I’m Byron Reese. Today, our guest is Rudina Seseri. She is the founding and manager partner over at Glasswing Ventures. She’s also an entrepreneur in residence at Harvard Business School and she holds an MBA from that same institution. Welcome to the show, Rudina.

Rudina Seseri: Hello Byron. Thank you for having me.

You wrote a really good piece for Gigaom, as a matter of fact; it was your advice to startups—don’t say you’re doing AI just to have the buzzwords on the side, you better be able to say what you’re really doing.  

What is your operational definition of artificial intelligence, and can you expand on that theme? Because I think it’s really good advice.

Sure, happy to. AI—and I think of it as the wave of disruption—has become such a popular term, and I think there are definitional challenges in the market. From my perspective, and at the very highest level, AI is technology, largely computers and software, that possesses or has some level of intelligence that mirrors that of humans. It’s as basic as one would imagine it to be by the very name artificial intelligence.

Where I think we are in the AI maturity curve, if one wants to express it in such a form, is really the early days of AI and the impact it is having and will have going forward. It’s really, what I would call, “narrow AI” in that we’re not at a point where machines, in general, can operate at the same level of diversity and complexity as the human mind. But for narrow purposes, or in a narrow function—for a number of areas across enterprise and consumer businesses—AI can be really transformational, even narrow AI.

Expressed differently, we think of AI as anything—such as visual recognition, social cognition, speech recognition—underpinned with a level of machine learning, with a particular interest around deep learning. I hope that helps.

That’s wonderful. You’re an investor so you get pitches all the time and you’re bound to see ones where the term AI is used, and it’s really just in there to play “buzzword bingo” and all of that… Because, your definition that it’s, “doing things humans would normally do” kind of takes me back to my cat food bowl that fills itself up when it’s empty. It’s weighing and measuring it so that I don’t have to. I used to do it, and now a computer does it. Surely, if you saw that in a business case, like, “We have an AI cat food bowl,” that really isn’t AI, or is it? And then you’ve got things like the Nest, which is a learning system. It learns as you do it, and yours is eventually going to be different than mine—I think that is clearly in the AI camp. What would be a case of something that you would see in a business case and just roll your eyes?

To address your examples and give you a few illustrations, I think in your example of the cat food plate or whatnot, I think you’re describing automation much more than AI. And you can automate it because it’s very prescriptive—if A takes place, then do B; if C takes place, then do D. I think that’s very different than AI.

I think when technologies and products are leveraging artificial intelligence, you are really looking for a learning capability. Although, to be perfectly honest, even within the world of artificial intelligence, researchers don’t agree on whether learning, in and of its own, qualifies as AI. But, coming back to everyday applications, I think, much like the human mind learns, in artificial intelligence, whatever facet of it, we are looking for some level of learning. For sure, there’s a differentiator.

To then address your question head on, my goodness, we’re seeing AI disrupt all facets—from cyber security and martech to IT and HR to new robotics platforms—it’s running the whole gamut. Why don’t I give you a perfect example, that’s a real example, and I can give you the name of a portfolio company so we make it even more practical and less hypothetical?

One of my recent investments is a company called Talla. Talla is taking advantage of natural language processing capabilities for the HR and IT organizations in particular, where they’re automating lower level tickets, Q&A for issues that an employee may have—maybe an outage of email or some other question around an HR benefit—and instead of having a human address the question, it is actually the bot that’s addressing the question. The bot is initially augmenting, so if the question is too complex and the bot can only take the answer so far and can’t fully address the particular question, then the human becomes involved. But the bot is learning, so when a second person has a similar question, the bot can actually address it fully.

In that instance, you have both natural language processing and a lot of learning, because no two humans ask the very same question. And even if we are asking the same question, we do not ask it in the same manner. That’s the beauty of our species. So, there’s a lot of learning that goes on in that regard. And, of course, it’s also the case that it’s driving productivity and augmentation. Does that address your question, Byron?

Absolutely. That’s Rob May’s company, isn’t it?

Yes, it is.

I know Rob; he’s a brilliant guy.

Phenomenal.

Specifically, with that concept, as we are able to automate more things at a human level, like customer service inquiries, how important do you think it is that the end-to-end user knows that they’re talking to a bot of some kind, as opposed to a person?

When you say “know,” are you trying to get at the societal norm of what… Is this a normative question?

Exactly. If I ask where is your FAQ and “Julia”—in air quotes—says, “Here. Our FAQs are located here,” and there was no human involved, how important is it that I, as an end user, know that it’s called “Julia Bot” not “Julia”?

I think disclosure is always best. There’s nothing to be hidden, there’s nothing that’s occurring that’s untoward. In that regard, I would personally advocate for erring on the side of disclosure rather than not, especially if there is learning involved, which means observing, on the part of the bot. I think it would be important. I also think that we’re in the early days of this type of technology being adopted and becoming pervasive that the best practices and norms have yet to be established.

Where I suspect you will see both, is what I call the “New York Times risk”—where we’ll have a lot more discussion around what’s an acceptable norm and what’s right and wrong in this emerging paradigm—when we read a story where something went the wrong way. Then we will all weigh in, and the bodies will come together and establish norms. But, I think, fundamentally, erring on the side of disclosure serves a company well at all times.

You’re an investor. You see all kinds of businesses coming along. Do you have an investment thesis like, “I am really interested in artificial intelligence applied to enterprises”? What is your thesis?

We refer to our thesis as—not only do we have a thesis, but I think we have a good name to capture it—“Intelligent, Connect and Protect,” wherein our firm strategy is to invest in startups that are really disrupting, in a positive manner, and revolutionizing the enterprise—from sales tech and martech, to pure IT and data; around platforms, be those software platforms or robotics and the like; as well as cyber security and infrastructure.

So that first part, around enterprise and platforms, is the “Connect” world and then the cyber security and the infrastructure is the protection of that ecosystem. The reason why we don’t just call it “Connect and Protect” is because with every single startup that we invest in, core to our strategy is the utilization, or taking advantage, of artificial intelligence, so that is the “Intelligent” part in describing, or in capturing our thesis.

Said differently, we fundamentally believe that if a technology startup, in this day and age, is not leveraging some form of machine learning, some facet of AI, it’s putting itself at a disadvantage from day one. Put more directly, it becomes legacy from the get-go, because from a performance point of view those legacy products, or products without any kind of learning and AI, just won’t be able to keep up and outperform their peers that do.

You’re based in Boston. Are you doing most of your investing on the East Coast?

For the most part, correct. Yes. East Coast, and in other pockets of opportunity where our strategy holds. There are some interesting things in areas like Atlanta with security, even in certain parts of Europe like London, Berlin, Munich, etcetera, but yes.

Are AI being used for different things on the East Coast than what we think of in Silicon Valley? Can you go into that a little more? Where do you see pockets that are doing different things?

I think AI is a massive wave, and I think we would be in our own bubble if we thought that it was divided by certain coasts. Where I think it manifests itself differently, however—and I think it’s impacting at a global level to be honest rather than in our own microcosms—is where you see a difference in the concentration of the talent pool around AI, and especially deep learning. Because, keep in mind, the notion of specializing in machine learning or visual cognition, but particularly deep learning, is the best example, it didn’t exist before 2012. We talk a lot about data scientists, but the true data scientists and machine learning experts are very, very hard to come by, because it is, in many ways, driven by the explosion in data, and then the maturity that the whole deep learning field is achieving to be commercializable, for the techniques to be used in real products. It’s all very new, only existing in the last five to—if you want to be generous—ten years.

From that perspective, where talent is concentrated makes a difference. To come back to how, maybe, the East Coast compares, I think we will see AI companies across the board. I’m very optimistic, in that I think we have quite a bit of concentration of AI through the universities on the east coast. I think of MIT, Carnegie Mellon, and Cornell; and what we’re seeing come out of Harvard and BU on the NLP side.

Across the universities, there are very, very deep pockets of talent, and I think that manifests itself both with the number and high quality of AI-enabled products and startups that we’re seeing get launched, but also for, what one would call, the “incumbents” such as Facebook, Amazon, Google, Uber, and the list goes on; if you look closely at where their AI teams are—even though almost all the companies I just mentioned are headquartered in the Valley and, in the case of Amazon, in Seattle—their AI talent is concentrated on the East Coast; probably most notably is Facebook’s AI headquartered in New York. So, combine that talent concentration with the market, that we, in particular, focus with our strategy around—the enterprise—where the East Coast has always had, and continues to have an advantage, I think, it’s an interesting moment in time.

I assume with the concentration of government on the East Coast and finance on the East Coast, that you see more technologies like security and those sorts of things. Specifically, with security, there’s been this game that’s gone back and forth for thousands of years between people who make codes, and people who break them. And nobody’s ever really come to an agreement about who has the harder job. Can you make an unbreakable code, and can it be broken? Do you think AI helps those who want to violate security, or those who want to defend against those violations, right now?

I think AI will play an important role in defending and securing the ecosystem. The reason I say that is because, in this day and age, with the exploding number of devices, and pervasive connectivity everywhere—translated in cyber security lingo, an increase in the number of endpoints, the areas of vulnerability, whether it is at the network level and device level or whether it is at the data and identity levels—has made us a lot more vulnerable, which is sort of the paradigm we live in.

Where I think AI and machine learning can be true differentiators is that not only can they be leveraged, again, for the various software solutions to continuously learn, but also on the predictive side they can point out where a vulnerability attack is being predicted before it actually takes place. There are certain patterns that help the enterprise to hone in on the vulnerability—from assessment to time of attack, at or during the attack, and then post attack. I do think that AI is a really meaningful differentiator for cyber security.

You alluded, just a moment ago, to the lack of talent; there just aren’t enough people who are well-versed in a lot of these topics. How does that shake out? Do you think that we will use artificial intelligence to make up for shortage of people with the skills? Or, do you think that universities are going to produce a surge of new talent coming in? How do we solve that? Because you look out your window, and almost everything you see, you could figure out how we could use data to study that and make it better. It’s kind of a blue ocean. What do you think is going to happen in the talent marketplace to solve for that?

AI eventually will be a layer, you’re absolutely right. From that perspective, I cannot come up with an area where AI will not play a role, broadly put, for the foreseeable future and for a long time in the future.

In terms of the talent challenge, let me address your question twofold. The talent shortage challenge that we have right now stems from the fact that it’s a relatively new field, or the resurgence of the field, and the ability to now actually deploy it in the real world and commercialize; this is what’s driving this demand. It’s the demand that has spurred it, and of course, the supply for that adjustment to take place requires talent, if I can think of it in that manner, and it’s not there. It’s a bit of a matter of market timing at one level. For sure, we will see many more students enter the field, many more students specialize and get trained in machine learning.

Then the real question becomes will part of their functions be automated? Will we need fewer humans to perform the same functions, which I think was the second part of your question if I understood it correctly?

Yes.

I think we’re in a phase of augmentation. And we’ve seen this in the past. Think about this, Byron: how did developers code, going back ten to fifteen years ago? Largely, in different languages, but largely, from the ground up. How do they code today? I don’t know of any developer who doesn’t use the tools available to get a quick spin up, and to ramp up quickly.

AI and machine learning are no different. Not every company is going to build their own neural net. Quite the opposite. A lot of them will use what’s open source and available out there in the market, or what’s commercialized for their needs. They might do some customization on top, and then they will focus on the product they’re building.

The fact that you will see part of the machine learning function that’s being performed by the data scientists be somewhat automated should come as no surprise, and that has nothing to do with AI. That has to do with driving efficiencies and getting tools and having access to open source support, if you will.

I think down the road—where AI plays a role both in augmentation and in automation—we will see definitional changes to what it means to be in a certain profession. For example, I think a medical doctor of the future might look, from a day-to-day activity point of view, very differently than what we perceive a doctor’s role to be—from interaction to what they’re trained at. The fact that a machine learning expert and a data scientist—which by the way are not the same thing but for the sake of argument, I’m using them interchangeably—are going to use tools, and not start from scratch but are going to leverage some level of automation and AI learning is par for the course.

When I give talks on these topics, especially on artificial intelligence, I always get asked the question, “What should I, or what should my children, study to remain employable in the future?”—and we’ll talk about that in a minute, about how AI kind of shakes up all of that.

There are two kind of extreme ends on this. One school of thought says everyone in school should learn how to code, everyone. It’s just like one of the three R’s, but it starts with a C. Everyone should learn to code. And then Mark Cuban, at South by Southwest here in Austin, said that the first trillionaires are going to be from AI companies because it offers the ability to make better decisions, right? And he said if he were coming up today, he would study philosophy, because it’s going to be that kind of thinking that allows you to use these technologies, and to understand how to apply them and whatnot.

On that spectrum of everyone should code, or no, we might just be making a glut of people to code, when what we really need are people to think about how to use these technologies and so forth, what would you say to that?

I have a 4-year-old daughter, so you better believe that I think about this topic quite a bit. My view is that AI is an enabler. It’s a tool for us as a society to augment and automate the mundane, and give us more ability and more room for creativity and different thinking. I would hope to God that the students of the future will study philosophy, they will study math, they will study the arts, they will study all the sciences that we know, and then some. Creativity of thinking, and diversity of thinking will remain the most precious asset we have, in my view.

I do think that, much like children today study the core hard sciences of math and chemistry and biology as well as literature, part of the core curriculum in the future will probably be some form of advanced data statistics, or inter machine learning, or some level computer sciences. We will see some technology training that becomes core, but I think that is a very, very, very different discussion than, “Everybody should study computer science or, looking forward, everybody should be a roboticist or machine learning expert or AI expert.” We need all the differentiation in thinking that we can get. Philosophy does matter, because what we do today shapes the present and society in the future.

Back to the talent question, to your point about someone who is well-versed in machine learning—which is different than data science, as you were saying—do you think those jobs are very difficult, and we’re always going to have a shortage of them because they’re just really hard? Or, do you think it’s just a case that we haven’t really taught them that much and they’re not any harder than coding in C or something? Which of those two things do you think it is?

I think it’s a bit more the latter than the former, that it’s a relatively new field. Yes, math and quants matter in this area, but it’s a new field. It will be talent that has certain predisposition around, like I said, math and quants, yes for sure. But, I do think that the shortage that we experience has a lot more to do with the newness of the field rather than the lack of interest or the lack of qualified talent or lack of aptitude.

One thing, when people say, “How can I spot a place to use artificial intelligence in my enterprise?” one thing I say is find things that look like games. Because every time AI wins in chess, and beats Ken Jennings in Jeopardy and Lee Sedol in Go—the games are really neat because they are these very constrained universes with definable rules and clear objectives.

So, for example, you mentioned HR in your list of all the things it was going to affect, so I’ll use that one. When you have a bunch of resumes, and you’ve hired some people that get great performance reviews, and some people that don’t, and you can think of them as points, or whatever—and you can then look at it as a big game, and you can then try to predict, you know? You can go into each part of the enterprise and say, “What looks like a game here?” Do you have a rule like that or just a guiding metaphor in your own mind? Because, you see all these business plans, right? Is there something like that, that you’re looking for?

There were several questions embedded in this. Let me see if I can decouple a couple of them. I think any area that is data-driven, any facet of the enterprise that is data-driven or that there is information, I think you can leverage learning and narrow AI for predictive, so you used some of the keywords. Is there opportunities for optimization? Are there areas where analytics are involved where you can move away from basic statistical models, and can start leveraging AI? I think where there is room for efficiency and automation, you can leverage it. It’s hard not to find an area where you can leverage it. The question is where can you create the most value?

For example, if you are on the forefront of an enterprise on the sales side, can you leverage AI? Of course, you can—not all prospective customers are created equal, there are better funnels, you can leverage predictives; the more and better data you have, the better are the outcomes. At the end of the day, your neural net will perform as well as the data you put in: junk in, junk out. That’s one facet.

If you’re looking at the marketing and technology side, think about how one can leverage machine learning and predictives around advertising, particularly on the programmatic side, so that you’re personalizing your engagement in whichever capacity with your consumer or your buyer. We can go down the list, Byron. I think the better question is what are the lower-hanging fruits that I can start taking advantage of AI right away, and which ones will I wait on rather than do I have any areas? If the particular manager or business person can’t find any areas, I think they’re missing the big picture, and the day-to-day execution.

I remember in the ‘90s when the consumer web became a big thing, and companies had a web department and they had a web strategy, and now that’s not really a thing, because the internet is part of your business. Do you think we’re like that with artificial intelligence, where it’s siloed now, but eventually, we won’t talk about it the way we’re talking about it now?

I do think so. I often get asked the very same question, “How do I think AI will shape up?” and I think AI will be a layer much like the internet has become a layer. I absolutely do. I think we will see tools and capabilities that will be ever pervasive.

Since AIs are only as good as the data you train them on, does it seem monopolistic to you that certain companies are in a place where they can constantly get more and more and more data, which they can therefore use to make their businesses stronger and stronger and stronger, and it’s hard for new entrants to come in because they don’t have access to the data? Do you think that data monopolies will become kind of a thing, and we’ll have to think about how to regulate them or how to make them available, or is that not likely?

I think the possession of data is, for sure, a barrier to entry in the market, and I do think that the current incumbents, probably more than we’ve ever seen before, have built this barrier to entry by amalgamating the data. How it will shake out… First of all, two thoughts: one, even though they have amassed huge amounts of data with this whole pervasive connectivity, and devices that stay connected all the time, even the large incumbents are only scratching the surface of the data we are generating, and the growth that we’ll continue to see on the data side. So, even though it feels oligarchy-like, maybe—not quite monopolistic—that the big players have so much data, I think we’re generating even more data going forward. So that’s sort of at the highest level.

I do think that, particularly on the consumer side, something needs to be done around customers taking control of their data. I think brands and advertisers have been squatting on consumer data with very little in return for us. I think, again, one can leverage AI in predictives, in that regard, to compensate—whether it’s through an experience or in some other form—consumers for their personal private data being used. And, we probably need some form of regulation, and I don’t know if it’s at the industry standard level, or with more regulatory bodies involved.

Not sure if you follow Sir Timothy Berners-Lee who invented the web, but he does talk a lot about data centralization. I think there is something quite substantive in his statements around centralizing the web and all the data and giving consumers a say. I think we’re seeing a bit of ground swell in that regard. How it will manifest itself? I’m not quite sure, but I do think that the discussion around data will remain very relevant and become even more important as the amount of data increases, and as it becomes critical in a barrier to entry for future businesses.

With regard to privacy in AI, do you think that we are just in a post-privacy world? Because so much of what you do is recorded one way or the other that data just exists and we’ll eventually get used to that. Or do you think people are always going to insist on the protections that you’re talking about, and ways to guarantee their anonymity; and that the technology will actually be used to help promote privacy, not to wear it down?

I think we haven’t given up on privacy. I think the definition of privacy might have changed, especially with the millennials and the social norms that they have been driving, and, largely, the rest of the population has adopted. I’d say we have a redefinition of privacy, but for sure, we haven’t given up on it; even the younger generations who often get accused of doing so. And you don’t need to take my word on it, look at what happened with Snap. Basically, in the early days, it was really almost tweens but let’s say it was teenagers who were on Snapchat and what they were doing was “borderline misbehavior” because it was going to go away, it wouldn’t leave a footprint. The value prop being that it disappears, so your privacy, your behavior, does not become exposed to the broader world. It mattered, and, in my view, it was a critical factor in the growth that the company saw.

I think you’d be hard pressed to find people, I’m sure they exist but I think they are in the minority, that would say, “Oh, I don’t care. Put all of my data, 24/7, let the world know what I’m up to.” Even on the exhibitionist side, I think there’s a limit to that. We care about privacy. How we define it today, I suspect, is very different than how we defined it in the past and that is something that’s still a bit more nebulous.

I completely agree with that. My experience with young people is they are onto it, they understand it better and they are all about it. Anyway, I completely agree with all of that.

So, what about European efforts with regard to the “right to know why”? If an artificial intelligence makes a decision that impacts your life—like gives you a loan or doesn’t—you have the right to know how that conclusion was made. How does that work in a world of neural nets where there may not be a why that’s understandable, kind of, in plain English? Do you think that that is going to hold up the development of black box systems, or that that’s a passing fad? What are your thoughts on that?

I think Europe has always been on the side of protecting consumers. We were just thinking about privacy, and look at what they are doing with GDPR, and what’s coming to market from the data point of view on the topic we were just wrapping up. I think, as we gain a better understanding of AI and as the field matures, if we hide behind, “We don’t quite know how the decision was made,” and we may not fully comprehend but if we hide behind the, “Oh, it’s hard to explain and people can’t understand it,” I think at some point it becomes a cop-out. I don’t think we need to educate everyone on how neural nets and deep learning are performed, but I think you can talk about the fundamentals of what are the drivers, how are they interacting with each other, and at a minimum, you can give the consumer some basic level of understanding as to where they probably outperformed or underperformed.

It reminds me, in tech, we used to use acronyms in talking to each other, and making everybody feel like they were less intelligent than the rest of the world. I don’t think we need to go into the science of artificial intelligence machine learning to help consumers understand how decisions were made. Because guess what? If we can’t explain it to the consumer, the person on the other side that’s managing the relationship will not understand it themselves.

I think you’re right, but, if you ask Google, “Why did this page come number one for this search?” the answer, “We don’t know,” is perfectly understandable. It’s six hundred different algorithms that go into how they rank pages—or whatever the number is, it’s big. So, how can they know why this is page number one and that is page number two?

They may not know fully, or it may take some effort to drill in specifically as to why, but at some level they can tell you what some of the underlying drivers were behind the ranking or how the ranking algorithms took place etcetera, etcetera. I think, Byron, what you and I are going back and forth on is, in my view, it’s a level of granularity question, rather than can they or can they not. It’s not a yes or a no, it’s a granularity question.

There’s a lot of fear in the world around the effect that artificial intelligence is going to have on people, and one of the fear areas is the effect on jobs. As you know, there kind of are three narratives. One narrative is that there are some people who don’t have a lot of training in things that machines can’t do, and the machines are eventually going to take their jobs, and that we’ll have some portion of the population that’s permanently unemployed, like a permanent Great Depression.

Then there’s a school of thought that says, “No, no, no. Everybody’s replaceable by a machine, that eventually, they’re going to get to a point where they can learn something new faster than a human, and then we’re all out of work.”

And then there’s a third group that says, “No, no, no, we’re not going to have any unemployment because we’ve had disruptive technologies: electricity, replacing animals with machines, and steam; all these really disruptive technologies, and unemployment never spiked because of those. All that happens is people learned to use those tools to increase their own productivity.”

My question to you is, which of those three narratives, or is there a fourth one, do you identify with?

I would say I identify only in part with the last narrative. I do think we will see job displacement. I do think we will see job displacement in categories of workers that we would have normally considered highly-skilled. In my view, what’s different about the paradigm we are in vis-à-vis, let’s say, the Industrial Revolution, is that it is not the lowest-trained workers or the highly-specialized workers—if you think about artisanal-type workers back in the day—that get displaced out of their roles, and, through automation, replaced by machines in the Industrial Revolution, or here by technology and the AI paradigm.

I think with the current paradigm and what’s tricky is that the middle class and the upper middle class gets impacted as much as the less-trained, low-skilled workers. There will be medical doctors, there will be attorneys, there will be highly-educated parts of the workforce where their jobs—some of the jobs may be done away with—in large part, will be redefined. And very analogous to the discussion we were just having about see a shortage in machine learning experts, we’ll see older generations who are still seeking to be active members of the workforce be are put out of the labor market, or are no longer qualified and require new training, and it will be a challenge for them to gain the training to be as high of a performer as someone who has been learning the particular skill that’s in medicine in an AI paradigm from the get-go.

I think we’ll see a shift in job definitions, and a displacement of meaningful chunks of the highly-trained workforce, and that will have significant societal consequences as well as economic consequences. Which is why I think a form of guaranteed basic income is a worthy discussion, at least until that generation of workers get settled and the new labor force that’s highly-trained in an AI-type of paradigm comes into play.

I also think there will be many, many, many new jobs and professions that will be created that we have yet to think about or even imagine as a result. I do not think that AI is a net negative in terms of creating entire unemployment or lower employment. It’s not a net negative. I think—McKenzie and many, many others have done studies on this—in the long term, we’ll probably see more employment than not created as a result of AI. But, at any point in time, as we look at the AI disruption and adoption over the next few decades, I think we will see moments of pain and meaningful pain.

That’s really interesting because, in the United States, as an example, since the industrial revolution, unemployment has been between five and nine percent, without fail five and nine percent, except the Great Depression which nobody said was caused by technology. If you think about an assembly line, an assembly line is AI. If you were making cars one at a time in a garage, and then all of a sudden, Henry Ford shows up and he makes them a hundred at a time and sells them for a tenth the price and they’re better, that has got to be like, “Oh my gosh, this AI, this technology just really upset this enormous amount of people,” and yet you never see unemployment go above nine percent in this country.

I will leave the predictions of the magnitude of the impact to the macroeconomists; I will focus on startups. But I do think, let me stick with that example, so have artisanal shops and sewing by hand, and then the machine comes along and the factory line, and now it’s all automated, and you and others are displaced. So, for every ten of you who were working, one is now on the factory line and nine are finding themselves out of a position. That was the paradigm I was describing a minute ago with doctors and lawyers and other professions, that a lot of their function will become automated or replaced by AI. But then, it’s also the case that now their children or their grandchildren are studying outer space, or are going into astronomy and other fields that we might have, at a folklore level, thought about, but never expected that we’d get there; so, new fields emerge.

The pain will be felt, though. What do you do with the nine out of ten who are, right there and then, out of a position? In the long term, in an AI paradigm, we’ll see many, many more professions get created. It’s just about where you get caught in the cycle.

It’s true. In ’95, you never would have thought, “If you just connect a bunch of computers together with a common protocol and make the web, you’re going to have Google and eBay and Etsy.”

Let’s talk about startups for a minute. You see a lot of proposals, and then you make investments, and then you help companies along. What would you say are the most common mistakes that you’re seeing startups make, and do you have general advice for portfolio companies?

Well, my portfolio companies get the advice in real time, but I think, especially for AI companies—to go back to how you opened this discussion, which was referencing a byline I had done for Gigaom—if a company truly does have artificial intelligence, show it. And it’s pretty easy to show. You show how your product leverages various learning techniques, you show who the people on your team are that are focusing on machine learning, but also how also you, the founder, whether you are a technical founder or not, understands the underpinnings of AI and of machine learning. I think that’s critical.

So many companies, they’re calling themselves something-something-dot-AI and it’s very, very similar and analogous to what we saw with big data. If you remember, seven to ten years ago, every company was big data. Every company is now AI, because it’s the hot buzzword. So, rising above the noise while taking advantage of the wave is important, but meaningfully so because it’s valuable to your business, and because, from the get-go, you’re taking advantage of machine learning and AI not because it’s the buzzword of the day that you think might get you money. The matter of fact is for those of us who live and breathe AI and startups, we’ll cut through the noise fairly quickly, and pattern recognition and the number of deals we see in any given week is such that the true AI capabilities will stand out. That’s one piece.

I do think, also, that for the companies and founders that truly are leveraging neural net, truly are getting the software or hardware—whatever their product might be—to outperform; the dynamics within the companies have changed. Because we don’t just have the technology team consisting of the developers with the link to the product people; we now have this third leg, the machine learning or the data scientist people. So, how is the product roadmap being driven, is it the product people driving it, or is the machine learning talent coming up with models to help support it, or are they driving it, and product is turning it into a roadmap, and technology, the developers, are implementing it? It’s a whole new dichotomy among these various groups.

There’s a school of thought, in fact, that says, “Machine learning experts, who’s that? It’s the developers who will have machine learning expertise, they will be the same people.” I don’t share the view. I think developers will have some level of fluency in machine learning AI, but I think we will have distinct talent around it. So, getting the culture right amongst those groups makes a very, very big difference to the outcome. I think it’s still in the making, to be honest.

This may be an unanswerable question, because it’s too vague.

Lucky me.

I know.

Go ahead.

Two business plans come across your desk, and one of them is a company that says, “We have access to data that nobody else has, and we can use this data to learn how to do something really well,” and the other one says, “We have algorithms that are so awesome that they can do stuff that nobody else knows how to do.” Which of those do you pick up and read first?

Let’s merge them. Ideally, you’d like to have both the algorithms, or the neural nets, and the data. If you really force me to pick one, I’ll pick the data. I think there are enough tools out there and there is enough TensorFlows or whatnot out there in the market and in open source, that I think you could probably work with those and build on top of them. Data becomes the big differentiator.

I think of data, Byron, today as we used to think of patents back in the day. The role of patents is an interesting topic because, with execution, they’ve taken second or third seat as a barrier to entry. But, back ten, fifteen years ago, patents mattered a lot more. I think data can give you that kind of barrier to entry and even more so. So, I pick data. It is an answerable question; I’ll pick big data.

Actually, my very next question was the role of patents in this world. Because doesn’t the world change so quickly, plus you have to disclose so much. Would you advise people to keep them as trade secrets? Or, just, how do you think that companies who develop a technology should protect and utilize it?

I think your question depends a bit on what facet of technology are we talking about. In the life sciences, they still matter quite a bit, which is an area that I don’t know as much about, for sure. I think, in technology, their role has diminished, although still relevant. I cannot think of a company that became big and a market leader because they had patents. I think they are an important facet, but it is not the make-all or break-all in terms of must-have. In my view, they are a nice to have.

I think where one pauses, is if their immediate competitor has a healthy body of patents, then you think a bit more about that. As far as the tradeoff between patents and trade secrets, I think there is a moment in time when one files a patent, especially if secrecy matters. At the end the day though—and this may be ironic given that we’re talking about artificial intelligence startups—much like any other facet of our lives, what matters is excellence of execution, and people. People can make or break you.

So, when you ask me about the various startups that I see, and talk about the business plans, I never think of them as “the business plan.” I always think of them in the context of, “Who are the founders? Who are the team members, the management team?” So, team first. Then, market timing for what they are going after, because you could have the right execution or the right product, but the wrong market timing. And then, of course, the question of what problem are they solving, and how are they taking advantage of AI. But, people matter. To come back to your question, patents are one more area that a startup can build defensibility but not the end-all and be-all by any stretch, and they have a diminished role, in fact.

How do you think startups have changed in the last five or ten years? Are they able to do more early? Or, are they demographically different—are they younger or older? How do you think the ecosystem evolves in a world where we have all these amazing platforms that you can access for free?

I think we’ve seen a shift. Earlier, you referenced the web, and with the emergence of the web, back in 1989, we saw digital and e-commerce and martech; and entire new markets get created. In that world—what I’ll call not just pure technology businesses, but tech-enabled businesses—we saw a shift both in younger demographics and startups founded by younger entrepreneurs, but also more diversity in terms of gender and background as well, in that not everybody needed to have a computer science degree or an engineering degree to be able to launch a tech or a tech-enabled company.

I think that became even more prevalent and emphasized in the more recent wave that we’re just on the completion side of with social-mobile. I mean, the apps, that universe and ecosystem, it’s two twenty-year-olds, right? It’s not the gray-headed three-time entrepreneur. So, we absolutely saw a demographic shift. In this AI paradigm, I think we’ll see a healthy mixture. We’ll see the researcher and the true machine learning expert who’s not quite twenty but not quite forty either, so, a bit more maturity. And then we’ll see the very young cofounder or the very experienced cofounder. I think we’ll see a mix of demographics and age groups, which is the best. Again, we’re in a business of diversity of thought and creativity. We’re looking for that person who’s taking advantage of the tools and innovation and what’s out there to reimagine the world and deliver a new experience or product.

I was thinking it’s a great time to be a university professor in these topics because, all of a sudden, they are finding themselves courted right and left because they have long-term deep knowledge in what everyone is trying to catch up on.

I would agree, but keep in mind that there is quite a bit of a chasm between teaching a topic and actually commercializing, in that regard. So I think the professors who are able to cross the chasm—not to sound too Geoffrey Moore-ish—are the ones, that, yes, they’re in the right field and in the right moment in time. Otherwise, their students, the talent that is knowledgeable enough, those PhDs that don’t go into academia, but are actually going into commercialization, execution, and implementation; that’s the talent that we’re in high demand for.

My last question is, kind of, how big can this be? If you’re a salesperson, and you have a bunch of leads, you can just use your gut, and pick one, and work that one, or you have data that informs you and makes you better. If you’re an HR person, you hire people more suited to the job than you would have before. If you’re a CEO, you make better decisions about something. If you’re a driver, you can get to the place quicker. I mean, when you add all of that up across an entire world of inefficiency… So, you kind of imagine this world where, on one end of the spectrum, we all just kind of stumble through life like drunken sailors on shore leave, randomly making decisions based on how we feel; and then you think of this other world where we have all of this data, and it’s all informed, and we make the best decisions all the time. Where do you think we are? Are we way over at the wandering around, and this this is going to get us over to the other side? How big of an impact is this? Could artificial intelligence double GNP in the United States? How would you say how big can it be?

Fortunately, or unfortunately, I don’t know, but I don’t think we live in a binary world. I think, like everything else, it’s going to be a matter of shades. I think we’ve driven productivity and efficiency, historically, to entirely new levels, but I don’t think we have any more free time, because we find other ways to occupy ourselves even in our roles. We have mobile phones now, we have—from a legacy perspective—laptops, computers, and whatnot; yet, somehow, I don’t find myself vacationing on the beach. Quite the contrary, I’m more swamped than ever.

I think we have to be careful about—if I understood your question correctly—transplanting technology into, “Oh, it will take care of everything and we’ll just kind of float around a bit dumber, a bit freer, and whatnot.” I think we’ll find different ways to reshape societal norms, not in a bad way, but in a, “What constitutes work?” way, and possibly explore new areas that we didn’t think were possible before.

I think it’s not necessarily about gaining efficiency, but I think we will use that time, not in an unproductive or leisurely way, but to explore other markets, other facets of life that we may or may not have imagined. I’m sorry for giving you such a high-level answer, and not making it more concrete. I think productivity from technology has been something that’s been, as you well know, very hard to measure. We know, anecdotally, that it’s had an impact on measured activity, but there are entire groups of macroeconomists, who, not only can they not measure it, but they don’t believe it has improved productivity.

It will have a fundamental transformative impact, whether we’re able to measure it—I know you defined it as GNP, but I’m defining it from a productivity point of view—or not remains to be seen. Some would argue, that it’s not productive, but I would throw the thought out there, that traditional methodologies of measuring productivity do not account for technological impact. Maybe we need to look at how we’re defining productivity. I don’t know if I answered your question.

That’s good. The idea that technology hasn’t increased our standard of living, I don’t think… I live a much more leisurely life than my great grandparents, not because I work any harder than them, but because I have technology in my life, and because I use that technology to make me more productive. I know the stuff you’re referring to where it’s like, “We’ve got all these computers in the office and worker productivity doesn’t seem to just be shooting through the roof.” I don’t know. Let’s leave it there.

Actually, I do have a final question. You said you have a four-year-old daughter, are you optimistic overall about the world she’s going to grow up in with these technologies?

My gosh! We’re going into a shrink session.

No, I mean are you an optimist or a pessimist about the future?

Apparently, I’ve just learned—in the spirit of sharing information with you and all your listeners—that my age group falls into something called the Xennial where we are very cynical like Generation X, but also optimists like the Millennials. I’m not sure what to make of that. I would call it an interesting hybrid.

I am very optimistic about my daughter’s future, though. I think of it as, today’s twentysomethings are digital natives, and today’s ten-year-olds and later are mobile natives. My daughter is going to be an AI native, and what an amazing moment in time for her to be living in this world. The opportunities she will have and the world she will explore on this planet and beyond, I think, will be fascinating. I do hope that somewhere in the process, we manage to find a bit more peace, and not destroy each other. But, short of that, I think I’m quite optimistic about the future that lies ahead.

Alrighty, well let’s leave it at that. I want to thank you for an absolutely fascinating hour. We touched on so many things and I just thank you for taking the time.

My pleasure. Thanks again for having me.

Byron explores issues around artificial intelligence and conscious computers in his upcoming book The Fourth Age, to be published in April by Atria, an imprint of Simon & Schuster. Pre-order a copy here.

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}

Voices in AI – Episode 21: A Conversation with Nikola Danaylov

.voice-in-ai-byline-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-byline-embed span {
color: #FF6B00;
}

In this episode, Byron and Nikola talk about singularity, consciousness, transhumanism, AGI and more.




0:00


0:00


0:00

var go_alex_briefing = {
expanded: true,
get_vars: {},
twitter_player: false,
auto_play: false
};

(function( $ ) {
‘use strict’;

go_alex_briefing.init = function() {
this.build_get_vars();

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘action’] ) {
this.twitter_player = ‘true’;
}

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘auto_play’] ) {
this.auto_play = go_alex_briefing.get_vars[‘auto_play’];
}

if ( ‘true’ == this.twitter_player ) {
$( ‘#top-header’ ).remove();
}

var $amplitude_args = {
‘songs’: [{“name”:”Episode 21: A Conversation with Nikola Danaylov”,”artist”:”Byron Reese”,”album”:”Voices in AI”,”url”:”https:\/\/voicesinai.s3.amazonaws.com\/2017-11-20-(01-05-27)-nikola-danaylov.mp3″,”live”:false,”cover_art_url”:”https:\/\/voicesinai.com\/wp-content\/uploads\/2017\/11\/voices-headshot-card-3.jpg”}],
‘default_album_art’: ‘https://gigaom.com/wp-content/plugins/go-alexa-briefing/components/external/amplify/images/no-cover-large.png&#8217;
};

if ( ‘true’ == this.auto_play ) {
$amplitude_args.autoplay = true;
}

Amplitude.init( $amplitude_args );

this.watch_controls();
};

go_alex_briefing.watch_controls = function() {
$( ‘#small-player’ ).hover( function() {
$( ‘#small-player-middle-controls’ ).show();
$( ‘#small-player-middle-meta’ ).hide();
}, function() {
$( ‘#small-player-middle-controls’ ).hide();
$( ‘#small-player-middle-meta’ ).show();

});

$( ‘#top-header’ ).hover(function(){
$( ‘#top-header’ ).show();
$( ‘#small-player’ ).show();
}, function(){

});

$( ‘#small-player-toggle’ ).click(function(){
$( ‘.hidden-on-collapse’ ).show();
$( ‘.hidden-on-expanded’ ).hide();
/*
Is expanded
*/
go_alex_briefing.expanded = true;
});

$(‘#top-header-toggle’).click(function(){
$( ‘.hidden-on-collapse’ ).hide();
$( ‘.hidden-on-expanded’ ).show();
/*
Is collapsed
*/
go_alex_briefing.expanded = false;
});

// We’re hacking it a bit so it works the way we want
$( ‘#small-player-toggle’ ).click();
$( ‘#top-header-toggle’ ).hide();
};

go_alex_briefing.build_get_vars = function() {
if( document.location.toString().indexOf( ‘?’ ) !== -1 ) {

var query = document.location
.toString()
// get the query string
.replace(/^.*?\?/, ”)
// and remove any existing hash string (thanks, @vrijdenker)
.replace(/#.*$/, ”)
.split(‘&’);

for( var i=0, l=query.length; i<l; i++ ) {
var aux = decodeURIComponent( query[i] ).split( '=' );
this.get_vars[ aux[0] ] = aux[1];
}
}
};

$( function() {
go_alex_briefing.init();
});
})( jQuery );

.go-alexa-briefing-player {
margin-bottom: 3rem;
margin-right: 0;
float: none;
}

.go-alexa-briefing-player div#top-header {
width: 100%;
max-width: 1000px;
min-height: 50px;
}

.go-alexa-briefing-player div#top-large-album {
width: 100%;
max-width: 1000px;
height: auto;
margin-right: auto;
margin-left: auto;
z-index: 0;
margin-top: 50px;
}

.go-alexa-briefing-player div#top-large-album img#large-album-art {
width: 100%;
height: auto;
border-radius: 0;
}

.go-alexa-briefing-player div#small-player {
margin-top: 38px;
width: 100%;
max-width: 1000px;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info {
width: 90%;
text-align: center;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info div#song-time-visualization-large {
width: 75%;
}

.go-alexa-briefing-player div#small-player-full-bottom {
background-color: #f2f2f2;
border-bottom-left-radius: 5px;
border-bottom-right-radius: 5px;
height: 57px;
}

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}

Byron Reese: This is Voices in AI, brought to you by Gigaom. I’m Byron Reese. Today our guest is Nikola Danilov. Nikola started the Singularity Weblog, and hosts the wildly popular singularity.fm podcast. He has been called the “Larry King of the singularity.” He writes under the name Socrates, or to the Bill & Ted fans out there, Socrates. Welcome to the show, Nikola.

Nikola Danaylov: Thanks for having me, Byron, it’s my pleasure.

So let’s begin with, what is the singularity?

Well, there are probably as many definitions and flavors as there are people or experts in the field out there. But for me, personally, the singularity is the moment when machines first catch up and eventually surpass humans in terms of intelligence.

What does that mean exactly, “surpass humans in intelligence”?

Well, what happens to you when your toothbrush is smarter than you?

Well, right now it’s much smarter than me on how long I should brush my teeth.

Yes, and that’s true for most of us—how long you should brush, how much pressure you should exert, and things like that.

It gives very bad relationship advice, though, so I guess you can’t say it’s smarter than me yet, right?

Right, not about relationships, anyway. But about the duration of brush time, it is. And that’s the whole idea of the singularity, that, basically, we’re going to expand the intelligence of most things around us.

So now we have watches, but they’re becoming smart watches. We have cars, but they’re becoming smart cars. And we have smart thermostats, and smart appliances, and smart buildings, and smart everything. And that means that the intelligence of the previously dumb things is going to continue expanding, while unfortunately our own personal intelligence, or our intelligence as a species, is not.

In what sense is it a “singularity”?

Let me talk about the roots of the word. The origin of the word singularity comes from mathematics, where it basically is a problem with an undefined answer, like five divided by zero, for example. Or in physics, where it signifies a black hole. That’s to say a place where there is a rupture in the fabric of time-space, and the laws of the universe don’t hold true as we know them.

In the technological sense, we’re borrowing the term to signify the moment where humanity stops being the smartest species on our planets, and machines surpass us. And therefore, beyond that moment, we’re going to be looking into a black hole of our future. Because our current models fail to be able to provide sufficient predictions as to what happens next.

So everything that we have already is kind of going to have to change, and we don’t know which way things are going to go, which is why we’re calling it a black hole. Because you cannot see beyond the event horizon of a black hole.

Well if you can’t see beyond it, give us some flavor of what you think is going to happen on this side of the singularity. What are we going to see gradually, or rapidly, happen in the world before it happens?

One thing is the “smartification” of everything around us. So right now, we’re still living in a pretty dumb universe. But as things come to have more and more intelligence, including our toothbrushes, our cars—everything around us—our fridges, our TVs, our computers, our tables, everything. Then that’s one thing that’s going to keep happening, until we have the last stage where, according to Ray Kurzweil, quote, “the universe wakes up,” and everything becomes smart, and we end up with different things like smart dust.

Another thing will be the merger between man and machine. So, if you look at the younger generation, for example, they’re already inseparable from their smartphones. It used to be the case that a computer was the size of a building—and by the way, those computers were even weaker in terms of processing power than our smartphones are today. Even the Apollo program used a much less powerful machine to send astronauts to the moon than what we have today in our pockets.

However, that change is not going to stop there. The next step is that those machines are going to actually move inside of our bodies. So they used to be inside of buildings, then they went on our body, in our pockets, and are now becoming what’s called “wearable technology.” But tomorrow it will not be wearable anymore, because it will be embedded.

It will be embedded inside of our gut, for example, to monitor our microbiome and to monitor how our health is progressing; it will be embedded into our brains even. Basically, there may be a point where it becomes inseparable from us. That in turn will change the very meaning of the definition of being human. Not only at the sort of collective level as a species, but also at the personal level, because we are possibly, or very likely, going to have a much bigger diversification of the understanding of what it means to be a human than we have right now.

So when you talk about computers becoming smarter than us, you’re talking about an AGI, artificial general intelligence, right?

Not necessarily. The toothbrush example is artificial narrow intelligence, but as it gets to be smarter and smarter there may be a point where it becomes artificial general intelligence, which is unlikely, but it’s not impossible. And the distinction between the two is that artificial general intelligence is equal or better than human intelligence at everything, not only that one thing.

For example, a calculator today is better than us in calculations. You can have other examples, like, let’s say a smart car may be better than us at driving, but it’s not better than us at Jeopardy, or speaking, or relationship advice, as you pointed out.

We would reach artificial general intelligence at the moment when a single machine will be able to be better at everything than us.

And why do you say that an AGI is unlikely?

Oh no, I was saying that an AGI may be unlikely in a toothbrush format, because the toothbrush requires only so many particular skills or capabilities, only so many kinds of knowledge.

So we would require the AGI for the singularity to occur, is that correct?

Yeah, well that’s a good question, and there’s a debate about it. But basically the idea is that anything you can think of which humans do today, that machine would be equal or better at it. So, it could be Jeopardy, it could be playing Go. It could be playing cards. It could be playing chess. It could be driving a car. It could be giving relationship advice. It could be diagnosing a medical disease. It could be doing accounting for your company. It could be shooting a video. It could be writing a paper. It could be playing music or composing music. It could be painting an impressionistic or other kind of piece of art. It could be taking pictures equal or better than Henri Cartier-Bresson, etc. Everything that we’re proud of, it would be equal or better at.

And when do you believe we will see an AGI, and when would we see the singularity?

That’s a good question. I kind of fluctuate a little bit on that. Depending on whether we have some kind of general sort of global-scale disaster like it could be nuclear war, for example—right now the situation is getting pretty tense with North Korea—or some kind of extreme climate-related event, or a catastrophe caused by an asteroid impact; falling short of any of those huge things that can basically change the face of the Earth, I would say probably 2045 to 2050 would be a good estimate.

So, for an AGI or for the singularity? Or are you, kind of, putting them both in the same bucket?

For the singularity. Now, we can reach human-level intelligence probably by the late 2020’s.

So you think we’ll have an AGI in twelve years?

Probably, yeah. But you know, the timeline, to me, is not particularly crucial. I’m a philosopher, so the timeline is interesting, but the more important issues are always the philosophical ones, and they’re generally related to the question of, “So what?” Right? What are the implications? What happens next?

It doesn’t matter so much whether it’s twelve years or sixteen years or twenty years. I mean, it can matter in the sense that it can help us be more prepared, rather than not, so that’s good. But the question is, so what? What happens next? That’s the important issue.

For example, let me give you another crucial technology that we’re working on, which is life extension technology, trying to make humanity “amortal.” Which is to say we’re not going to be immortal—we can still die if we get ran over by a truck or something like that—but we would not be likely to die from general causes of death that we see today, which are usually old-age related.

As an individual, I’m hoping that I will be there when we develop that technology. I’m not sure I will still be alive when we have it, but as a philosopher what’s more important to me is, “So what? What happens next?” So yeah, I’m hoping I’ll be there, but even if I’m not there it is still a valid and important question to start considering and investigating right now—before we are at that point—so that we are as intellectually and otherwise prepared for events like this as possible.

I think the best guesses are, we would live to about 6,750. That’s how long it would take for some, you know, Wile E Coyote kind of piano-falling-out-the-top-floor-of-a-building-and-landing-on-you thing to happen to you, actuarially-speaking.

So let’s jump into philosophy. You’re, of course, familiar with Searle’s Chinese Room question. Let me set that up for the listeners, and then I’ll ask you to comment on it.

So it goes like this: There’s a man, we’ll call him the librarian. And he’s in this giant room that’s full of all of these very special books. And the important part, the man does not speak any Chinese, absolutely no Chinese. But people slide him questions under the door that are written in Chinese.

He takes their question and he finds the book which has the first symbol on the spine, and he finds that book and he pulls it down and he looks up the second symbol. And when he finds the second symbol and it says go to book 24,601, and so he goes to book 24,601 and looks up the third symbol and the fourth and the fifth—all the way to the end.

And when he gets to the end, the final book says copy this down. He copies these lines, and he doesn’t understand what they are, slides it under the door back to the Chinese speaker posing the question. The Chinese speaker picks it up and reads it and it’s just brilliant. I mean, it’s absolutely over-the-top. You know, it’s a haiku and it rhymes and all this other stuff.

So the philosophical question is, does that man understand Chinese? Now a traditional computer answer might be “yes.” I mean, the room, after all, passes the Turing test. Somebody outside sliding questions under the door would assume that there’s a Chinese speaker on the other end, because the answers are so perfect.

But at a gut level, the idea that this person understands Chinese—when they don’t know whether they’re talking about cholera or coffee beans or what have you—seems a bit of a stretch. And of course, the punchline of the thing is, that’s all a computer can do.

All a computer can do is manipulate ones and zeros and memory. It can just go book to book and look stuff up, but it doesn’t understand anything. And with no understanding, how can you have any AGI?

So, let me ask you this? How do you know that that’s not exactly what’s happening right now in my head? How do you know that me speaking English to you right now is not the exact process you described?

I don’t know, but the point of the setup is: If you are just that, then you don’t actually understand what we’re actually talking about. You’re just cleverly answering things, you know, it is all deterministic, but there’s, quote, “nobody home.” So, if that is the case, it doesn’t invalidate any of your answers, but it certainly limits what you’re able to do.

Well, you see, that’s a question that relates very much with consciousness. It relates to consciousness, and, “Are you aware of what you’re doing,” and things like that. And what is consciousness in the first place?

Let’s divide that up. Strictly speaking, consciousness is subjective experience. “I had an experience of doing X,” which is a completely different thing than “I have an intellectual understanding of X.” So, just the AGI part, the simple part of: does the man in the room understand what’s going on, or not?

Let’s be careful here. Because, what do you mean by “understand”? Because you can say that I’m playing chess against a computer. Do I understand the playing of chess better than a computer? I mean what do you mean by understand? Is it not understanding that the computer can play equal or better chess than me?

The computer does not understand chess in the meaningful sense that we have to get at. You know, one of the things we humans do very well is we generalize from experience, and we do that because we find things are similar to other things. We understand that, “Aha, this is similar to that,” and so forth. A computer doesn’t really understand how to play chess. It’s arguable that the computer is even playing chess, but putting that word aside, the computer does not understand it.

The computer, that program, is never going to figure out baccarat any more than it can figure out how many coffee beans Colombia should export next year. It just doesn’t have any awareness at all. It’s like a clock. You wind a clock, and tick-tock, tick-tock, it tells you the time. We progressively add additional gears to the clockwork again and again. And the thesis of what you seem to be saying is that, eventually, you add enough gears so that when you wind this thing up, it’s smarter than us and it can do absolutely anything we can do. I find that to be, at least, an unproven assumption, let alone perhaps a fantastic one.

I agree with you on the part that it’s unproven. And I agree with you that it may or may not be an issue. But it depends about what you’re going for here, and it depends on the computer you’re referring to, because we have the new software that was invented by AlphaGo to play Go. And that actually learned to play the program exactly based on the previous games—that’s to say, on the previous experience by other players. And then that same kind of approach of learning from the past, and coming up with new creative solutions to the future was then implemented in a bunch of other fields, including bioengineering, including medicine, and so on.

So when you say the computer will never be able to calculate how many beans that country needs for next season, actually it can. That’s why it’s getting more and more generalized intelligence.

Well, let me ask that question a slightly different way. So I have, hypothetically, a cat food dish that measures out cat food for my cat. And it learns, based on the weight of the food in it, the right amount to put out. If the cat eats a lot, it puts more out. If the cat eats less, it puts less out. That is a learning algorithm, that is an artificial intelligence. It’s a learning one, and it’s really no different than AlphaGo, right? So what do you think happens from the cat dish—

—I would take issue with you saying it’s really no different from AlphaGo.

Hold on, let me finish the question; I’m eager to hear what you have to say. What happens, between the cat food AI and AlphaGo and an AGI? At what point does something different happen? Where does that break, and it’s not just a series of similar technologies?

So, let me answer your question this way… When you have a baby born, it’s totally dumb, stupid, blind, and deaf. It lacks complete self-awareness. Its unable to differentiate between itself and its environment, and it lacks complete self-awareness for probably the first, arguably, year-and-a-half to two years. And there’s a number of psychological tests that can be administered as the child develops. Usually girls, by the way, do about three to six months better, or they develop personal awareness faster and earlier than boys, on average. But let’s say the average age is about a year-and-a-half to two years—and that’s a very crude estimation, by the way. The development of AI would not be exactly the same, but there will be parallels.

The question you’re raising is a very good question. I don’t have a good answer because, you know, that can only happen with direct observational data—which we don’t have right now to answer your question, right? So, let’s say tomorrow we develop artificial general intelligence. How would we know that? How can we test for that, right? We don’t know.

We’re not even sure how we can evaluate that, right? Because just as you suggested, it could be just a dumb algorithm, processing just like your algorithm is processing how much cat food to provide to your cat. It can lack complete self-awareness, while claiming that it has self-awareness. So, how do we check for that? The answer is, it’s very hard. Right now, we can’t. You don’t know that I even have self-awareness, right?

But, again, those are two different things, right? Self-awareness is one thing, but an AGI is easy to test for, right? You give a program a list of tasks that a human can do. You say, “Here’s what I want you to do. I want you to figure out the best way to make espresso. I want you to find the Waffle House…” I mean, it’s a series of tasks. There’s nothing subjective about it, it’s completely objective.

Yes.

So what has happened between the cat food example, to the AlphaGo, to the AGI—along that spectrum, what changed? Was there some emergent property? Was there something that happened? Because you said the AlphaGo is different than my cat food dish, but in a philosophical sense, how?

It’s different in the sense that it can learn. That’s the key difference.

So does my cat food thing, it gives the cat more food some days, and if the cat’s eating less, it cuts the cat food back.

Right, but you’re talking just about cat food, but that’s what children do, too. Children know nothing when they come into this world, and slowly they start learning more and more. They start reacting better, and start improving, and eventually start self-identifying, and eventually they become conscious. Eventually they develop awareness of the things not only within themselves, but around themselves, etc. And that’s my point, is that it is a similar process; I don’t have the exact mechanism to break down to you.

I see. So, let me ask you a different question. Nobody knows how the brain works, right? We don’t even know how thoughts are encoded. We just use this ubiquitous term, “brain activity,” but we don’t know how… You know, when I ask you, “What was the color of your first bicycle?” and you can answer that immediately, even though you’ve probably never thought about it, nor do you have some part of your brain where you store first bicycles or something like that.

So, assuming we don’t know that, and therefore we don’t really know how it is that we happen to be intelligent. By what basis do you say, “Oh, we’re going to build a machine that can do something that we don’t even know how we do,” and even put a timeline on it, to say, “And it’s going to happen in twelve years”?

So there are a number of ways to answer your question. One is, we don’t necessarily need to know. We don’t know how we create intelligence when we have babies, too, but we do it. How did it happen? It happened through evolution; so, likewise, we have what are called “evolutionary algorithms,” which are basically algorithms that learn to learn. And the key point, as Dr. Stephen Wolfram proved years ago in his seminal work Mathematica, from very simple things, very complex patterns can emerge. Look at our universe; it emerged from tiny little, very simple things.

Actually I’m interviewing Lawrence Krauss next week, he says it emerged from nothing. So from nothing, you have the universe, which has everything, according to him at least. And we don’t know how we create intelligence in the baby’s case, we just do it. Just like you don’t know how you grow your nails, or you don’t know how you grow your hair, but you do it. So, likewise, just one of the many different paths that we can take to get to that level of intelligence is through evolutionary algorithms.

By the way, this is what’s sometimes referred to as the black box problem, and AlphaGo is a bit of an example of that. There are certain things we know, and there are certain things we don’t know that are happening. Just like when I interviewed David Ferrucci, who was the team leader behind Watson, we were talking about, “How does Watson get this answer right and that answer wrong?” His answer is, “I don’t really know, exactly.” Because there are so many complicated things coming together to produce an answer, that after a certain level of complexity, it becomes very tricky to follow the causal chain of events.

So yes, it is possible to develop intelligence, and the best example for that is us. Unless you believe in that sort of first-mover, God-is-the-creator kind of thing, that somebody created us—you can say that we kind of came out of nothing. We evolved to have both consciousness and intelligence.

So likewise, why not have the same process only at the different stratum? So, right now we’re biologically-based; basically it’s DNA code replicating itself. We have A, C, T, and G. Alternatively, is it inconceivable that we can have this with a binary code? Or even if not binary, some other kind of mathematical code, so you can have intelligence evolve—be it silicone-based, be it photon-based, or even organic processor-based, be it quantum computer-based… what have you. Right?

So are you saying that there could be no other stratum, and no other way that could ever hold intelligence other than us? Then my question to you will be, well what’s the evidence of that claim? Because I would say that we have the evidence that it’s happened once. We could therefore presume that it could not be necessarily limited to only once. We’re not that special, you know. It could possibly happen again, and more than once.

Right, I mean it’s certainly a tenable hypothesis. The Singularians, for the most part, don’t treat it as a hypothesis, they treat it as a matter of faith.

That’s why I’m not such a good Singularitarian.

They say, “We have achieved consciousness and an AGI. We have a general intelligence. Therefore, we must be able to build one.” You don’t generally apply that logic to anything else in life, right? There is a solar system, therefore we must be able to build one. There is a third dimension, we must be able to build one.

With almost nothing else in life do you do it, and yet people who talk about the singularity, and are willing to put a date on it, by the way, to them there’s nothing up for debate. Even though all the things that are required for it are completely unknown, how we achieved them.

Let me give you Daniel Dennett’s take on things, for example. He says that consciousness doesn’t exist. That it is self-delusion. He actually makes a very, very good argument about it, per se. I’ve been trying to get him on my podcast for a while. But he says it’s total self-fabrication, self-delusion. It doesn’t exist. It’s beside the point, right?

But he doesn’t deny that we’re intelligent though. He just says that what we call “consciousness” is just brain activity. But he doesn’t say, “Oh, we don’t really have a general intelligence, either.” Obviously, we’re intelligent.

Exactly. But that’s kind of what you’re trying to imply with the machines, because they will be intelligent in the sense that they will be able to problem-solve anything that we’re able to problem-solve, as we pointed out—whether it’s chess, whether it’s cat food, whether it’s playing or composing the tenth symphony. That’s the point.

Okay, well that’s at least unquestionably the theory.

Sure.

So let’s go from there. Talk to me about Transhumanism. You write a lot about that. What do you think we’ll be able to do? And if you’re willing to say, when do you think we’ll be able to do it? And, I mean, a man with a pacemaker is a Transhuman, right? He can’t live without it.

I would say all of us are already cyborgs, depending on your definition. If you say that the cyborg is an organism consisting of, let’s say, organic and inorganic parts working together in a single unit, then I would answer that if you have been vaccinated, you’re already a cyborg.

If you’re wearing glasses, or contact lenses, you’re already a cyborg. If you’re wearing clothes and you can’t survive without them, or shoes, you’re already a cyborg, right? Because, let’s say for me, I am severely short-sighted with my eyesight. I’m like, -7.25 or something crazy like that. I’m almost kind of blind without my contacts. Almost nobody knows that, unless people listen to these interviews, because I wear contacts, and for all intents and purposes I am as eye-capable as anybody else. But take off my contacts and I’ll be blind. Therefore you have one single unit between me and that inorganic material, which basically I cannot survive without.

I mean, two hundred years ago, or five hundred years ago, I’d probably be dead by now, because I wouldn’t be able to get food. I wouldn’t be able to survive in the world with that kind of severe shortsightedness.

The same with vaccinations, by the way. We know that the vast majority of the population, at least in the developed world, has at least one, and in most cases a number of different vaccines—already by the time you’re two years old. Viruses, basically, are the carriers for the vaccines. And viruses straddle that line, that gray area between living and nonliving things—the hard-to-classify things. They become a part of you, basically. You carry those vaccine antibodies, in most cases, for the rest of your life. So I could say, according to that definition, we are all cyborgs already.

That’s splitting a hair in a very real sense though. It seems from your writing you think we’re going to be doing much more radical things than that; things which, as you said earlier, call into question whether or not we’re even human anymore. What are those things, and why does that affect our definition of “human”?

Let me give you another example. I don’t know if you’ve seen in the news, or if your audience has seen in the news, a couple of months ago the Chinese tried to modify human embryos with CRISPR gene-editing technology. So we are not right now at the stage where, you know… It’s been almost 40 years since we had the first in vitro babies. At the time, basically what in vitro meant was that you do the fertilization outside of the womb, into a petri dish or something like that. And then you watch the division process begin, and then you select—by basically visual inspection—what looks to be the best-fertilized egg, simply by visual examination. And that’s the egg that you would implant.

Today, we don’t just observe; we actually we can preselect. And not only that, we can actually go in and start changing things. So it’s just like when you’re first born, you start learning the alphabet, then you start reading full words; then you start reading full sentences; and then you start writing yourself.

We’re doing, currently, exactly that with genetics. We were starting to just identify the letters of the alphabet thirty, or forty, or fifty years ago. Then we started reading slowly; we read the human genome about fifteen years ago. And now we’re slowly starting to learn to write. And so the implication of that is this: how does the meaning of what it means to be human change, when you can change your sex, color, race, age, and physical attributes?

Because that’s the bottom line. When we can go and make changes at the DNA level of an organism, you can change all those parameters. It’s just like programming. In computer science it’s 0 and 1. In genetics it’s ATCG, four letters, but it’s the same principle. In one case, you’re programming a software program for a computer; in the other case, you’re programming living organisms.

But in that example, though, everybody—no matter what race you are—you’re still a human; no matter what gender you are, you’re still a human.

It depends how you qualify “human,” right? Let’s be more specific. So right now, when you say “humans,” what you mean actually is Homo sapiens, right? But Homo sapiens has a number of very specific physical attributes. When you start changing the DNA structure, you can actually change those attributes to the point where the result doesn’t carry those physical attributes anymore. So are you then Homo sapiens anymore?

From a biological point of view, the answer will most likely depend on how far you’ve gone. There’s no breakpoint, though, and different people will have a different red line to cross. You know for some, just a bit. So let’s say you and your wife or partner want to have a baby. And both of you happen to be carriers of a certain kind of genetic disease that you want to avoid. You want to make sure, before you conceive that baby, the fertilized egg doesn’t carry that genetic material.

And that’s all you care about, that’s fine. But someone else will say, that’s your red line, whereas my red line is that I want to give that baby the good looks of Brad Pitt, I want to give it the brain of Stephen Hawking, and I want to give it the strength of a weightlifter, for example. Each person who is making that choice would go for different things, and would have different attributes that they would choose to accept or not to accept.

Therefore, you would start having that diversification that I talked about in the beginning. And that’s even before you start bringing in things like neural cognitive implants, etc.—which would be basically the merger between men and machine, right? Which basically means that you can have both parallel developments of biotech or genetics. Our biological evolution and development, accelerated, on the other hand; and on the other hand, you can have the merger of that with the acceleration and evolution and improvement of computer technology and neurotech. When you put those two things together, you end up with a final entity which is nothing like what we are today, and it definitely would not fit the definition of being human.

Do you worry, at some level, that it’s taken us five thousand years of human civilization to come up with this idea that there are things called human rights? That there are these things you don’t do to a person no matter what. That you’re born with them, and because you are human, you have these rights.

Do you worry that, for better or worse, what you’re talking about will erode that? That we will lose this sense of human rights, because we lose some demarcation of a human is.

That’s a very complicated question. I would suggest people read Yuval Harari’s book Homo Deus on that topic, and the previous one was called Sapiens. Those two are probably the best two books that I’ve read in the last ten years. But basically, the idea of human rights is an idea that was born just a couple hundred years ago. It came to exist with humanism, and especially liberal humanism. Right now, if you see how it’s playing out, humanism is kind of taking what religion used to do, in the sense of that religion used to put God in the center of everything—and then, since we were his creation, everything else was created for us, to serve us.

For example the animal world, etc., and we used to have the Ptolemaic idea of the universe, where the earth was the center, and all of those things. Now, what humanism is doing is putting the human in the center of the universe, and saying humanity has this primacy above everything else, just because of our very nature. Just because you are human, you have human rights.

I would say that’s an interesting story, but if we care about that story we need to push it even further.

In our present context, how is that working out for everyone else other than humanity? Well the moment we created humanism and invented human rights, we basically made humanity divine. We took the divinity from God, and gave it to humanity, but we downgraded everybody else. So animals which, back in the day—let’s say the hunter-gatherer society—we considered ourselves to be equal and on par with the animals.

Because you see, one day I would kill you and eat you, next day maybe a tiger would eat me. That’s how the world was. But now, we downgraded all the animals to machine—they don’t have consciousness, they don’t have any feelings, they lack self-awareness—and therefore we can enslave and kill them any way we wish and like.

So as a result, we pride ourselves on our human rights and things like that, and yet we enslave and kill seventy to seventy-five billion animals every year, and 1.3 trillion sea organisms like fish, annually. So the question then is, if we care so much about rights, why should they be limited only to human rights? Are we saying that other living organisms are incapable of suffering? I’m a dog owner, I have a seventeen-and-a-half-year-old dog. She’s on her last leg. She actually had a stroke last weekend.

I can tell you that she has taught me that she possesses the full spectrum of happiness and suffering that I do, pretty much. Even things like jealousy, and so on, she demonstrated to me multiple times, right? Yet, we today use that idea of humanism and human rights to defend ourselves and enslave everybody else.

I would suggest it’s time to expand that and say, first, to our fellow animals, that we need to include them, that they have their own rights, first of all. Second of all, that possibly rights should not be limited to organic organisms, and should not be called human or animal rights, but they should be called intelligence rights, or even beyond intelligence—any kind of organism that can exhibit things like suffering and happiness and pleasure and pain.

Because obviously, there is a different level of intelligence between me and my dog—we would hope—but she’s able to suffer as much as I am, and I’ve seen it. And that’s true especially more for whales and great apes and stuff like that, which we have brought to the brink of extinction right now. We want to be special, that’s what religion does to us. That’s what humanism did with human rights.

Religion taught us that we’re special because God created us in his own image. Then humanism said there is no God, we are the God, so we took the place of God—we took his throne and said, “We’re above everybody else.” That’s a good story, but it’s nothing more than a story. It’s a myth.

You’re a vegan, correct?

Yes.

How far down would you extend these rights? I mean, you have consciousness, and then below that you have sentience, which is of course a misused word. People use “sentience” to mean intelligence, but sentience is the ability to feel something. In your world, you would extend rights at some level all the way down to anything that can feel?

Yeah, and look: I’ve been a vegan for just over a year and a couple of months, let’s say fourteen months. So, just like any other human being, I have been, and still am, very imperfect. Now, I don’t know exactly how far we should expand that, but I would say we should stop immediately at the level we can easily observe that we’re causing suffering.

If you go to a butcher shop, especially an industrialized farming butcher shop, where they kill something like ten thousand animals per day—it’s so mechanized, right? If you see that stuff in front of your eyes, it’s impossible not to admit that those animals are suffering, to me. So that’s at least the first step. I don’t know how far we should go, but we should start at the first steps, which are very visible.

What do you think about consciousness? Do you believe consciousness exists, unlike Dan Dennett, and if so where do you think it comes from?

Now you’re putting me on the spot. I have no idea where it comes from, first of all. You know, I am atheist, but if there’s one religion that I have very strong sympathies towards, that would be Buddhism. I particularly value the practice of meditation. So the question is, when I meditate—and it only happens rarely that I can get into some kind of deep meditation—is that consciousness mine, or am I part of it?

I don’t know. So I have no idea where it comes from. I think there is something like consciousness. I don’t know how it works, and I honestly don’t know if we’re part of it, or if it is a part of us.

Is it at least a tenable hypothesis that a machine would need to be conscious, to be an AGI?

I would say yes, of course, but the next step, immediately, is how do we know if that machine has consciousness or not? That’s what I’m struggling with, because one of the implications is that the moment you accept, or commit to that kind of definition, that we’re only going to have AGI if it has consciousness, then the question is, how do we know if and when it has consciousness? An AGI that’s programmed to say, “I have consciousness,” well how do you know if it’s telling the truth, and if it’s really conscious or not? So that’s what I’m struggling with, to be more precise in your answers.

And mind you, I have the luxury of being a philosopher, and that’s also kind of the negative too—I’m not an engineer, or a neuroscientist, so…

But you can say consciousness is required for an AGI, without having to worry about, well how do we measure it, or not.

Yes.

That’s a completely different thing. And if consciousness is required for an AGI, and we don’t know where human consciousness comes from, that at least should give us an enormous amount of pause when we start talking about the month and the day when we’re going to hit the singularity.

Right, and I agree with you entirely, which is why I’m not so crazy about the timelines, and I’m staying away from it. And I’m generally on the skeptical end of things. By the way, for the last seven years of my journey I have been becoming more and more skeptical. Because there are other reasons or ways that the singularity…

First of all, the future never unfolds the way we think it will, in my opinion. There’s always those black swan events that change everything. And there are issues when you extrapolate, which is why I always stay away from extrapolation. Let me give you two examples.

The easy example is when you have positive, or let’s say negative extrapolation. We have people such as Lord Kelvin—he was the president of the British Royal Society, one of the smartest people—who wrote a book in the 1890’s about how heavier-than-air aircraft are impossible to build.

The great H.G. Wells wrote, just in 1902, that heavier-than-air aircraft are totally impossible to build, and he’s a science fiction writer. And yet, a year later the Wright brothers, two bicycle makers, who probably never read Lord Kelvin’s book, and maybe didn’t even read any of H.G. Wells’ science fiction novels, proved them both wrong.

So people were extrapolating negatively from the past. Saying, “Look, we’ve tried to fly since the time of Icarus, and the myth of Icarus is a warning to us all: we’re never going to be able to fly.” But we did fly. So we didn’t fly for thousands of years, until one day we flew. That’s one kind of extrapolation that went wrong, and that’s the easy one to see.

The harder one is the opposite, which is called positive extrapolation. From 1903 to, let’s say, the late 1960s, we went from the Wright brothers, to the moon. People said—amazing people, like Arthur C. Clarke—said, well if we made it from 1903 to the late 1960s to the moon, by 2002 we will be beyond Mars; we will be outside of our solar system.

That’s positive extrapolation. Based on very good data for, let’s say, sixty-five years from 1903 to 1968—very good data—you saw tremendous progress in aerospace technology. We went to the moon several times, in fact, and so on and so on. So it was logical to extrapolate that we would be by Mars and beyond, today. But actually, the opposite happened. Not only did we not reach Mars by today, we are actually unable to get back to the moon, even. As Peter Thiel says in his book, we were promised flying cars and jetpacks, but all we got was 140 characters.

In other words, beware of extrapolations, because they’re true until they’re not true. You don’t know when they are going to stop being true, and that’s the nature of black swan sorts of things. That’s the nature of the future. To me, it’s inherently unknowable. It’s always good to have extrapolations, and to have ideas, and to have a diversity of scenarios, right?

That’s another thing which I agree with you on: Singularians tend to embrace a single view of the future, or a single path to the future. I have a problem with that myself. I think that there’s a cone of possible futures. There are certainly limitations, but there is a cone of possibilities, and we are aware of only a fraction of it. We can extrapolate only in a fraction of it, because we have unknown unknowns, and we have black swan phenomena, which can change everything dramatically. I’ve even listed three disaster scenarios—like asteroids, ecological collapse, or nuclear weapons—which can also change things dramatically. There are many things that we don’t know, that we can’t control, and that we’re not even aware of that can and probably will change the actual future from the future we think will happen today.

Last philosophical question, and then I’d like to chat about what you’re working on. Do you believe humans have free will?

Yes. So I am a philosopher, and again—just like with the future—there are limitations, right? So all the possible futures stem from the cone of future possibilities derived from our present. Likewise, our ability to choose, to make decisions, to take action, have very strict limitations; yet, there is a realm of possibilities that’s entirely up to us. At least that’s what I’m inclined to think. Even though most scientists that I meet and interview on my podcast are actually one level, or one degree or another degree, of determinist.

Would an AGI need to have free will in order to exist?

Yes, of course.

Where do you think human free will comes from? If every effect had a cause, and every decision had a cause—presumably in the brain—whether it’s electrical or chemical or what have you… Where do you think it comes from?

Yeah, it could come from quantum mechanics, for example.

That only gets you randomness. That doesn’t get you somehow escaping the laws of physics, does it?

Yes, but randomness can be sort of a living-cat and dead-cat outcome, at least metaphorically speaking. You don’t know which one it will be until that moment is there. The other thing is, let’s say, you have fluid dynamics, and with the laws of physics, we can predict how a particular system of gas, will behave within the laws of fluid dynamics. But it’s impossible to predict how a single molecule or atom will behave within that system. In other words, if the laws of the universe and the laws of physics set the realm of possibilities, then within that realm, you can still have free will. So, we are such tiny minuscule little parts of the system, as individuals, that we are more akin to atoms, if not smaller particles than that.

Therefore, we can still be unpredictable.

Just like it’s unpredictable, by the way, with quantum mechanics, to say, “Where is the electron located?” and if you try to observe it, then you are already impacting on the outcome. You’re predetermining it, actually, when you try to observe it, because you become a part of the system. But if you’re not observing it, you can create a realm of possibilities where it’s likely to be, but you don’t know exactly where it is. Within that realm, you get your free will.

Final question: Tell us what you’re working on, what’s exciting to you, what you’re reading about… I see you write a lot about movies. Are there any science fiction movies that you think are good ones to inform people on this topic? Just talk about that for a moment.

Right. So, let me answer backwards. In terms of movies—it’s been awhile since I’ve watched it, but I actually even wrote a review on in—one of the movies that I really enjoyed watching, it’s by the Wachowskis, and it’s called “Cloud Atlas.” I don’t think that movie was very successful at all, to be honest with you.

I’m not even sure if they managed to recover the money they invested in it, but in my opinion it was one of the top ten best movies I’ve ever seen in my life. Because it’s a sextet—so it had six plots progressing in a parallel fashion, in six different timelines. So six things happening in six different locations in six different epochs, with six different timelines, with tremendous actors, and it touched on a lot of those future technologies, and even the meaning of being human—what separates us from the others, and so on.

I would suggest people check out “Cloud Atlas.” One of my favorite movies. The previous question you asked was, what am I working on?

Mm-hmm.

Well, to be honest, I just finished my first book three months ago or something. I launched it on January 23rd I think. So I’ve been basically promoting my book, traveling, giving speeches, trying to raise awareness about the issues, and the fact that, in my view, we are very unprepared—as a civilization, as a society, as individuals, as businesses, and as governments.

We are going to witness a tremendous amount of change in the next several decades, and I think we’re grossly unprepared. And I think, depending on how we handle those changes, with genetics, with robotics, with nanotech, with artificial intelligence—even if we never reach the level of artificial general intelligence, by the way, that’s beside the point to me—just the changes we’re going to witness as a result of the biotech revolution can actually put our whole civilization at risk. They’re not just only going to change the meaning of what it is to be human, they would put everything at risk. All of those things converging together, in the narrow span of several decades basically, I think, create this crunch point which could be what some people have called a “pre-singularity future,” which is one possible answer to the Fermi Paradox.

Enrico Fermi was this very famous Italian mathematician who, a few decades ago, basically observed that there are two-hundred billion galaxies just in the observable realm of the universe. And each of those two-hundred billion galaxies has two-hundred billion stars. In other words, there’s almost an endless number of exoplanets like ours—which are located in the Goldilocks area, where it’s not too hot or too cold—which can potentially give birth to life. The question then is, if there are so many planets and so many stars and so many places where we can have life, where is everybody? Where are all the aliens? There’s a diversity of answers to that question. But at least one of those possible scenarios, to explain this paradox, is what’s referred to as the pre-singularity future. Which is to say, in each civilization, there comes a moment where its technological prowess surpasses its capacity to control it. Then, possibly, it self-destructs.

So in other words, what I’m saying is that it may be an occurrence which happens on a regular basis in the universe. It’s one way to explain the Fermi Paradox, and it’s possibly the moment that we’re approaching right now. So it may be a moment where we go extinct like dinosaurs; or, if we actually get it right—which right now, to be honest with you, I’m getting kind of concerned about—then we can actually populate the universe. We can spread throughout the universe, and as Konstantin Tsiolkovsky said, “Earth is the cradle of humanity, but sooner or later, we have to leave the cradle.” So, hopefully, in this century we’ll be able to leave the cradle.

But right now, we are not prepared—neither intellectually, nor technologically, nor philosophically, nor ethically, not in any way possible, I think. That’s why it’s so important to get it right.

The name of your book is?

Conversations with the Future: 21 Visions for the 21st Century.

All right, Nikola, it’s been fascinating. I’ve really enjoyed our conversation, and I thank you so much for taking the time.

My pleasure, Byron.

Byron explores issues around artificial intelligence and conscious computers in his upcoming book The Fourth Age, to be published in April by Atria, an imprint of Simon & Schuster. Pre-order a copy here.

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}

Voices in AI – Episode 20: A Conversation with Marie des Jardins

.voice-in-ai-byline-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-byline-embed span {
color: #FF6B00;
}

In this episode, Byron and Marie talk about the Turing test, Watson, autonomous vehicles, and language processing.




0:00


0:00


0:00

var go_alex_briefing = {
expanded: true,
get_vars: {},
twitter_player: false,
auto_play: false
};

(function( $ ) {
‘use strict’;

go_alex_briefing.init = function() {
this.build_get_vars();

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘action’] ) {
this.twitter_player = ‘true’;
}

if ( ‘undefined’ != typeof go_alex_briefing.get_vars[‘auto_play’] ) {
this.auto_play = go_alex_briefing.get_vars[‘auto_play’];
}

if ( ‘true’ == this.twitter_player ) {
$( ‘#top-header’ ).remove();
}

var $amplitude_args = {
‘songs’: [{“name”:”Episode 20: A Conversation with Marie des Jardins”,”artist”:”Byron Reese”,”album”:”Voices in AI”,”url”:”https:\/\/voicesinai.s3.amazonaws.com\/2017-11-20-(01-03-03)-marie-de-jardin.mp3″,”live”:false,”cover_art_url”:”https:\/\/voicesinai.com\/wp-content\/uploads\/2017\/11\/voices-headshot-card-2.jpg”}],
‘default_album_art’: ‘https://gigaom.com/wp-content/plugins/go-alexa-briefing/components/external/amplify/images/no-cover-large.png&#8217;
};

if ( ‘true’ == this.auto_play ) {
$amplitude_args.autoplay = true;
}

Amplitude.init( $amplitude_args );

this.watch_controls();
};

go_alex_briefing.watch_controls = function() {
$( ‘#small-player’ ).hover( function() {
$( ‘#small-player-middle-controls’ ).show();
$( ‘#small-player-middle-meta’ ).hide();
}, function() {
$( ‘#small-player-middle-controls’ ).hide();
$( ‘#small-player-middle-meta’ ).show();

});

$( ‘#top-header’ ).hover(function(){
$( ‘#top-header’ ).show();
$( ‘#small-player’ ).show();
}, function(){

});

$( ‘#small-player-toggle’ ).click(function(){
$( ‘.hidden-on-collapse’ ).show();
$( ‘.hidden-on-expanded’ ).hide();
/*
Is expanded
*/
go_alex_briefing.expanded = true;
});

$(‘#top-header-toggle’).click(function(){
$( ‘.hidden-on-collapse’ ).hide();
$( ‘.hidden-on-expanded’ ).show();
/*
Is collapsed
*/
go_alex_briefing.expanded = false;
});

// We’re hacking it a bit so it works the way we want
$( ‘#small-player-toggle’ ).click();
$( ‘#top-header-toggle’ ).hide();
};

go_alex_briefing.build_get_vars = function() {
if( document.location.toString().indexOf( ‘?’ ) !== -1 ) {

var query = document.location
.toString()
// get the query string
.replace(/^.*?\?/, ”)
// and remove any existing hash string (thanks, @vrijdenker)
.replace(/#.*$/, ”)
.split(‘&’);

for( var i=0, l=query.length; i<l; i++ ) {
var aux = decodeURIComponent( query[i] ).split( '=' );
this.get_vars[ aux[0] ] = aux[1];
}
}
};

$( function() {
go_alex_briefing.init();
});
})( jQuery );

.go-alexa-briefing-player {
margin-bottom: 3rem;
margin-right: 0;
float: none;
}

.go-alexa-briefing-player div#top-header {
width: 100%;
max-width: 1000px;
min-height: 50px;
}

.go-alexa-briefing-player div#top-large-album {
width: 100%;
max-width: 1000px;
height: auto;
margin-right: auto;
margin-left: auto;
z-index: 0;
margin-top: 50px;
}

.go-alexa-briefing-player div#top-large-album img#large-album-art {
width: 100%;
height: auto;
border-radius: 0;
}

.go-alexa-briefing-player div#small-player {
margin-top: 38px;
width: 100%;
max-width: 1000px;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info {
width: 90%;
text-align: center;
}

.go-alexa-briefing-player div#small-player div#small-player-full-bottom-info div#song-time-visualization-large {
width: 75%;
}

.go-alexa-briefing-player div#small-player-full-bottom {
background-color: #f2f2f2;
border-bottom-left-radius: 5px;
border-bottom-right-radius: 5px;
height: 57px;
}

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}

Byron Reese: This is Voices in AI, brought to you by Gigaom. I’m Byron Reese. Today I’m excited that our guest is Marie des Jardins. She is an Associate Dean for Engineering and Information Technology as well as a professor of Computer Science at the University of Maryland, Baltimore County. She got her undergrad degree from Harvard, and a Ph.D. in computer science from Berkeley, and she’s been involved in the National Conference of the Association for the Advancement of Artificial Intelligence for over 12 years. Welcome to the show, Marie.

Marie des Jardins: Hi, it’s nice to be here.

I often open the show with “What is artificial intelligence?” because, interestingly, there’s no consensus definition of it, and I get a different kind of view of it from everybody. So I’ll start with that. What is artificial intelligence?

Sure. I’ve always thought about artificial intelligence as just a very broad term referring to trying to get computers to do things that we would consider intelligent if people did them. What’s interesting about that definition is it’s a moving target, because we change our opinions over time about what’s intelligent. As computers get better at doing things, they no longer seem that intelligent to us.

We use the word “intelligent,” too, and I’m not going to dwell on definitions, but what do you think intelligence is at its core?

So, it’s definitely hard to pin down, but I think of it as activities that human beings carry out, that we don’t know of lower order animals doing, other than some of the higher primates who can do things that seem intelligent to us. So intelligence involves intentionality, which means setting goals and making active plans to carry them out, and it involves learning over time and being able to react to situations differently based on experiences and knowledge that we’ve gained over time. The third part, I would argue, is that intelligence includes communication, so the ability to communicate with other beings, other intelligent agents, about your activities and goals.

Well, that’s really useful and specific. Let’s look at some of those things in detail a little bit. You mentioned intentionality. Do you think that intentionality is driven by consciousness? I mean, can you have intentionality without consciousness? Is consciousness therefore a requisite for intelligence?

I think that’s a really interesting question. I would decline to answer it mainly because I don’t think we ever can really know what consciousness is. We all have a sense of being conscious inside our own brains—at least I believe that. But of course, I’m only able to say anything meaningful about my own sense of consciousness. We just don’t have any way to measure consciousness or even really define what it is. So, there does seem to be this idea of self-awareness that we see in various kinds of animals—including humans—and that seems to be a precursor to what we call consciousness. But I think it’s awfully hard to define that term, and so I would be hesitant to put that as a prerequisite on intentionality.

Well, I think people agree what it is in a sense. Consciousness is the experience of things. It’s having a subjective experience of something. Isn’t the debate more like where does that come from? How does that arise? Why do we have it? But in terms of the simple definition, we do know that, don’t we?

Well, I don’t know. I mean, where does it come from, how does it arise, and do different people even have the same experience of consciousness as each other? I think when you start to dig down into it, we don’t have any way to tell whether another being is conscious or self-aware other than to ask them.

Let’s look at that for a minute, because self-awareness is a little different. Are you familiar with the mirror test that Professor Gallup does, where they take a sleeping animal, and paint a little red spot on its forehead, and then wait until it walks by a mirror, and if it stops and rubs its own forehead, then, according to the theory, it has a sense of self and therefore it is self-aware. And the only reason all of this matters is if you really want to build an intelligent machine, you have to start with what goes into that. So do you think that is a measure of self-awareness, and would a computer need to pass the mirror test, as it were?

That’s where I think we start to run into problems, right? Because it’s an interesting experiment, and it maybe tells us something about, let’s say, a type of self-awareness. If an animal’s blind, it can’t pass that test. So, passing the test therefore can’t be a precursor to intelligence.

Well, I guess the question would be if you had the cognitive ability and a fully functional set of senses that most of your species have, are you able to look at something else and determine that, “I am a ‘me’” and “That’s a reflection of me,” and “That actually is me, but I can touch my own forehead.”

I’m thinking, sorry. I’m being nonresponsive because I’m thinking about it, and I guess what I’m trying to say is that a test that’s designed for animals that have evolved in the wild is not necessarily a meaningful test for intelligent agents that we’ve engineered, because I could design a robot that can pass that test, that nobody would think was self-aware in any interesting and meaningful sense. In other words, for any given test you design, I can game and redesign my system to pass that test. But the problem is that the test measures something that we think is true in the wild, but as soon as we say, “This is the test,” we can build the thing that passes that test that doesn’t do what we meant for the agent to be able to do, to be self-aware.

Right. And it should be pointed out that there are those who look at the mirror test and say, “Well, if you put a spot on an animal’s hand, and just because they kind of wipe their hand…” That it’s really more a test of do they have the mental capability to understand what a mirror does?” And it has nothing to do with…

Right. Exactly. It’s measuring something about the mirror and so forth.

Let’s talk about another thing in your intelligence definition, because I’m fascinated by what you just kind of outlined. You said that some amount of communication, therefore some language, is necessary. So do you think—at least before we get to applying it to machines—that language is a requisite in the animal kingdom for intelligence?

Well, I don’t think it has to be language in the sense of the English language or our human natural language, but there are different ways to communicate. You can communicate through gestures. You can communicate through physical interaction. So it doesn’t necessarily have to be spoken language, but I do think the ability to convey information to another being that can then receive the information that was conveyed is part of what we mean by intelligence. Languages for artificial systems could be very limited and constrained, so I don’t think that we necessarily have to solve the natural language problem in order to develop what we would call intelligent systems. But I think when you talk about strong AI, which is referring to sort of human level intelligence, at that point, I don’t think you can really demonstrate human level intelligence without being able to communicate in some kind of natural language.

So, just to be clear, are you saying language indicates intelligence or language is required for intelligence?

Language is required for intelligence.

There are actually a number of examples in the plant kingdom where the plants are able to communicate signals to other plants. Would you say that qualifies? If you’re familiar with any of those examples, do those qualify as language in a meaningful sense, or is that just like, “Well, you can call it language if you’re trying to do clever thought riddles, but it’s not really a language.”

Yeah, I guess I’d say, as with most interesting things, there’s sort of a spectrum. But one of the characteristics of intelligent language, I think, is the ability to learn the language and to adapt the language to new situations. So, you know, ants communicate with each other by laying down pheromones, but ants can’t develop new ways to communicate with each other. If you put them into a new environment, they’re biologically hardwired to use communication.

There’s an interesting philosophical argument that the species is intelligent, or evolution is intelligent at some level. I think those are interesting philosophical discussions. I don’t know that they’re particularly helpful in understanding intelligence in individual beings.

Well, I definitely want to get to computers here in a minute and apply all of this as best we can, but… By our best guess, humans acquired speech a hundred thousand years ago, roughly the same time we got fire. The theory is that fire allowed us to cook food, which allowed us to break down the proteins in it and make it more digestible, and that that allowed us to increase our caloric consumption, and we went all in on the brain, and that gave us language. Would your statement that language is a requirement for intelligence imply that a hundred and one thousand years ago, we were not intelligent?

I would guess that human beings were communicating with each other a hundred and one thousand years ago and probably two hundred thousand years ago. And again, I think intelligence is a spectrum. I think chimpanzees are intelligent and dolphins are intelligent, at some level. I don’t know about pigs and dogs. I don’t have strong evidence.

Interestingly, of all things, dogs don’t pass the red paint mirror test. They are interestingly the only animal on the whole face of the earth—and by all means, any listener out there who knows otherwise, please email me—that if you point at an object, will look at the object.

Really?

Yeah, even chimpanzees don’t do it. So it’s thought that they co-evolved with us as we domesticated them. That was something we selected for, not overtly but passively, because that’s useful. It’s like, “Go get that thing,” and then the dog looks over there at it.

Right.

It’s funny, there’s an old Far Side cartoon—you can’t get those things out of your head—where the dolphins are in the tank, and they’re writing down all the dolphins’ noises, and they’re saying things like, “Se habla español,” and “Sprechen sie Deutsch,” and the scientists are like, “Yeah, we can’t make any sense of it.”

So let’s get back to language, because I’m really fascinated by this and particularly the cognitive aspects of it. So, what do you think is meaningful, if anything, about the Turing test—which of course you know, but for the benefit of our listeners, is: Alan Turing put this out that if you’re on a computer terminal, and you’re chatting with somebody, typing, and you can’t tell if it’s a person or a machine, then you have to say that machine is intelligent.

Right, and of course, Alan Turing’s original version of that test was a little bit different and more gendered if you’re familiar.

He based it on the gendered test, right. You’re entirely right. Yes.

There’s a lot of objections to the Turing test. In fact, when I teach the Introductory AI class at UMBC, I have the students read some of Alan Turing’s work and then John Searle’s arguments against the Turing test.

Chinese Room, right?

The Chinese Room and so forth, and I have them talk about all of that. And, again, I think these are, sort of, interesting philosophical discussions that, luckily, we don’t actually need to resolve in order to keep making progress towards intelligence, because I don’t think this is one that will ever be resolved.

Here’s something I think is really interesting: when that test was proposed, and in the early years of AI, the way it was envisioned was based on the communication of the time. Today’s Turing tests are based in an environment in which we communicate very differently—we communicate very differently online than we do in person—than Alan Turing ever imagined we would. And so the kind of chat bots that do well at these Turing tests really probably wouldn’t have looked intelligent to an AI researcher in the 1960s, but I don’t think that most social media posts would have looked very intelligent, either. And so we’ve kind of adapted ourselves to this sort of cryptic, darting, illogical, jumping-around-in-different-topics way of conversing with each other online, where lapses in rationality and continuity are forgiven really easily. And when I see some of the transcripts of modern Turing tests, I think, well, this kind of reminds me a little bit of PARRY. I don’t know if you’re familiar with ELIZA and PARRY.

Weizenbaum’s 1960s Q&A, his kind of psychologist helper, right?

Right. So ELIZA was a pattern-recognition-based online psychologist that would use this, I guess, Freudian way of interrogating a patient, to ask them about their feelings and so forth. And when this was created, people were very taken in by it, because, you know, they would spill out their deepest, darkest secrets to what turned out to be, essentially, one of the earliest chat bots. There was a version of that that was created later. I can’t remember the researcher who created it, but it was studying paranoid schizophrenia and the speech patterns of paranoid schizophrenics, and that version of ELIZA was called PARRY.

If you read any transcripts by PARRY, it’s very disjointed, and it can get away with not having a deep semantic model, because if it doesn’t really understand anything, and if it can’t match anything, it just changes the topic. And that’s what the modern Turing test look like to me, mostly. I think if we were going to really use the Turing test as some measure of intelligence, I think maybe we need to put some rules on critical thinking and rationality. What is it that we’re chatting about? And what is the nature of this communication with the agent in the black box? Because, right now, it’s just degenerated into, again, this kind of gaming the system. Well, let’s just see if we can trick a human into thinking that we’re a person, but we get to take advantage of the fact that online communication is this kind of dance that we play that’s not necessarily logical and rational and rule-following.

I want to come back to that, because I want to go down that path with you, but beforehand, it should be pointed out, and correct me if I’m wrong because you know this a lot better than I do, but the people who interacted with ELIZA all knew it was a computer and that there was “nobody at home.” And that, in the end, is what freaked Weizenbaum out, and had him turn on artificial intelligence, because I think he said something to the effect that when the computer says, “I understand,” it’s a lie. It’s a lie because there is no “I,” and there’s nothing to understand. Was that the same case with PARRY that they knew full and well they were talking to a machine, but they still engaged with it as if it was another person?

Well, that was being used to try to model the behavior of a paranoid schizophrenic, and so my understanding is that they ran some experiments where they had psychologists, in a blind setting, interact with an actual paranoid schizophrenic or this model, and do a Turing test to try to determine whether this was a convincing model of paranoid schizophrenic interaction style. I think it was a scientific experiment that was being run.

So, you used the phrase, when you were talking about PARRY just now, “It doesn’t understand anything.” That’s obviously Searle’s whole question with the Chinese Room, that the non-Chinese speaker who can use these books to answer questions in Chinese doesn’t understand anything. Do you think even today a computer understands anything, and will a computer ever understand anything?

That’s an interesting question. So when we talk about this with my class, with my students, I use the analogy of learning a new language. I don’t know if you speak any foreign languages to any degree of fluency.

I’m still working on English.

Right. So, I speak a little bit of French and a little bit of German and a little bit of Italian, so I’m very conscious of the language learning process. When I was first learning Italian, anything I said in Italian was laboriously translated in my mind by essentially looking up rules. I don’t remember any Italian, so I can’t use Italian as an example anymore. I want to say, “I am twenty years old” in French, and so in order to do that, I just don’t say, “J’ai vingt ans”; I say to myself, “How do I say, ‘I am 20 years old’? Oh, I remember, they don’t say, ‘I am 20 years old.’ They say, ‘I have 20 years.’ OK. ‘I have’ is ‘J’ai,’ ‘twenty’ is ‘vingt’…” And I’m doing this kind of pattern-based look up in my mind. But doing that inside my head, I can communicate a little bit in French. So do I understand French?

Well, the answer to that question would be “no,” but what you understand is that process you just talked about, “OK, I need to deconstruct the sentence. I need to figure out what the subject is. I need to line that up with the verb.” So yes, you have a higher order understanding that allows you to do that. You understand what you’re doing, unquestionably.

Right.

And so the question is, at that meta-meta-meta-meta-meta level, will a computer ever understand what it’s doing.

And I think this actually kind of gets back to the question of consciousness. Is understanding—in the sense that Searle wants it to be, or Weizenbaum wanted it to be—tied up in our self-awareness of the processes that we’re carrying out, to reason about things in the world?

So, I only have one more Turing test question to ask, then I would love to change the subject to the state of the art today, and then I would love to talk about when you think we’re going to have certain advances, and then maybe we can talk about the impact of all this technology on jobs. So, with that looking forward, one last question, which is: when you were talking about maybe rethinking the Turing test, that we would have a different standard, maybe, today than Turing did. And by the way, the contests that they have where they really are trying to pass it, they are highly restricted and constrained, I think. Is that the case?

I am not that familiar with them, although I did read The Most Human Human, which is a very interesting book if you are looking for some light summer reading.

All right.

Are you familiar with the book? It’s by somebody who served as a human in the Loebner Prize Turing test, and sort of his experience of what it’s like to be the human.

No. I don’t know that. That’s funny. So, the interesting thing was that—and anybody who’s heard the show before will know I use this example—I always start everyone with the same question. I always ask the same question to every system, and nobody ever gets it right, even close. And because of that, I know within three seconds that I’m not talking to a human. And the question is: “What’s larger? The sun or a nickel?” And no matter how, I think your phrase was “schizophrenic” or “disjointed” or what have you, the person is, they answer, “The sun” or “Duh” or “Hmm.” But no machine can.  

So, two questions: Is that question indicative of the state of the art, that we really are like in stone knives and bear skins with natural language? And second, do you think that we’re going to make strides forward that maybe someday you’ll have to wonder if I’m actually not a sophisticated artificial intelligence chatting with you or not?

Actually, I guess I’m surprised to hear you say that computers can’t answer that question, because I would think Watson, or a system like that, that has a big backend knowledge base that it’s drawing on would pretty easily be able to find that. I can Google “How big is the sun?” and “How big is a nickel?” and apply a pretty simple rule.

Well, you’re right. In all fairness, there’s not a global chat bot of Watson that I have found. I mean, the trick is nickel is both a metal and a coin, and the sun is a homophone that could be a person’s son. But a person, a human, makes that connection. These are both round and so they kind of look like alike and whatnot. When I say it, what I mean is you go to Cleverbot, or you go to the different chat bots that are entered in the Turing competitions and whatnot. You ask Google, you type that into Google, you don’t get the answer. So, you’re right, there are probably systems that can nail it. I just never bump into them.

And, you know, there’s probably context that you could provide in which the answer to that question would be the nickel. Right? So like I’ve got a drawing that we’ve just been talking about, and it’s got the sun in it, and it has a nickel in it, and the nickel is really big in the picture, and the sun is really small because it’s far away. And I say, “Which is bigger?” There might actually be a context in which the obvious answer isn’t actually the right answer, and I think that kind of trickiness is what makes people, you know, that’s the signal of intelligence, that we can kind of contextualize our reasoning. I think the question as a basic question, it’s such a factual question, that that’s the kind of thing that I think computers are actually really good at. What do you love more: A rose or a daisy? That’s a harder question.

Right.

You know, or what’s your mother’s favorite flower? Now there’s a tricky question.

Right. I have a book coming out on this topic at the end of the year, and I try to think up the hardest question, like what’s the last one. I’m sure listeners will have better ideas than I have. But one I came up with was: Dr. Smith is eating at her favorite restaurant when she receives a phone call. She rushes out, neglecting to pay her bill. Is management likely to prosecute? So we need to know: She’s probably a medical doctor. She probably got an emergency call. It’s her favorite restaurant, so she’s probably known there. She dashes out. Are they really going to go to all the effort to prosecute, not just get her to pay next time she’s in and whatnot? That is the kind of thing that has so many layers of experience that it would be hard for a machine to do.

Yeah, but I would argue that I think, eventually, we will have intelligent agents that are embedded in the world and interact with people and build up knowledge bases of that kind of common sense knowledge, and could answer that question. Or a similar type of question that was posed based on experience in the world and knowledge of interpersonal interactions. To me, that’s kind of the exciting future of AI. Being able to look up facts really fast, like Watson… Watson was exciting because it won Jeopardy, but let’s face it: looking up a lot of facts and being able to click on a buzzer really fast are not really the things that are the most exciting about the idea of an intelligent, human-like agent. They’re awfully cool, don’t get me wrong.

I think when we talk about commercial potential and replacing jobs, which you mentioned, I think those kinds of abilities to retrieve information really quickly, in a flexible way, that is something that can really lead to systems that are incredibly useful for human beings. Whether they are “strong AI” or not doesn’t matter. The philosophical stuff is fun to talk about, but there’s this other kind of practical, “What are we really going to build and what are we going to do with it?”

Right.

And it doesn’t require answering those questions.

Fair enough. In closing on all of that other part, I heard Ken Jennings speak at South by Southwest about it, and I will preface this by saying he’s incredibly gracious. He doesn’t say, “Well, it was rigged.” He did describe, though, that the buzzer situation was different, because that’s the one part that’s really hard to map. Because the buzzer’s the trick on Jeopardy, not the answers.

That’s right.

And that was all changed up a bit.

Ken is clearly the best human at the buzzer. He’s super smart, and he knows a ton of stuff, don’t get me wrong, I couldn’t win on Jeopardy. But I think it’s that buzzer that’s the difference. And so I think it would be really interesting to have a sort of Jeopardy contest in which the buzzer doesn’t matter, right? So, you just buzz in, and there’s some reasonable window in which to buzz in, and then it’s random who gets to answer the question, or maybe everybody gets to answer the question independently. A Jeopardy-like thing where that timed buzzing in isn’t part of it; it’s really the knowledge that’s the key. I suspect Watson would still do pretty well, and Ken would still do pretty well, but I’m not sure who would win in that case. It would depend a lot on the questions, I think.

So, you gave us a great segue just a minute ago when you said, “Is all of this talk about consciousness and awareness and self and Turing test and all that—does it matter?” And it sounded like you were saying, whether it does or doesn’t, there is plenty of exciting things that are coming down the pipe. So let’s talk about that. I would love to hear your thoughts on the state of the art. AI’s passed a bunch of milestones, like you said, there was chess, then Jeopardy, then AlphaGo, and then recently poker. What are some things, you think—without going to AGI which we’ll get to in a minute—we should look for? What’s the state of the art, and what are some things you think we’re going to see in a year, or two years, three years, that will dominate the headlines?

I think the most obvious thing is self-driving cars and autonomous vehicles, right? Which we already have out there on the roads doing a great deal. I drive a Volvo that can do lane following and can pretty much drive itself in many conditions. And that is really cool and really exciting. Is it intelligence? Well, no, not by the definitions we’ve just been talking about, but the technology to be able to do all of that very much came out of AI research and research directions.

But I guess there won’t be a watershed with that, like, in the way that one day we woke up and Lee Sedol had lost. I mean, won’t it be that in three years, the number one Justin Bieber song will have been written by an AI or something like that, where it’s like, “Wow, something just happened”?

Yeah, I guess I think it’s a little bit more like cell phones. Right? I mean, what was the moment for cell phones? I’m not sure there was one single.

Fair enough. That’s right.

It’s more of like a tipping point, and you can look back at it and say, “Oh, there’s this inflection point.” And I don’t know what it was for cell phones. I expect there was an inflection point when either cell phone technology became cheap enough, or cell tower coverage became prevalent enough that it made sense for people to have cell phones and start using them. And when that happened, it did happen very fast. I think it will be the same with self-driving cars.

It was very fast that cars started coming out with adaptive cruise control. We’ve had cruise control for a long time, where your car just keeps going at the same speed forever. But adaptive cruise control, where your car detects when there’s something in front of it and slows down or speeds up based on the conditions of the road, that happened really fast. It just came out and now lots of cars have that, and people are kind of used to it. GPS technology—I was just driving along the other day, and I was like, “Oh yeah, I’ve got a map in my car all the time.” And anytime I want to, I can say, “Hey, I’d like to go to this place,” and it will show me how to get to that place. We didn’t have that, and then within a pretty short span of time, we have that, and that’s an AI derivative also.

Right. I think that those are all incredibly good points. I would say with cell phones—I can remember in the mid ‘90s, the RAZR coming out, which was smaller, and it was like, “Wow.” You didn’t know you had it in your pocket. And then, of course, the iPhone was kind of a watershed thing.

Right. A smartphone.

Right. But you’re right, it’s a form of gradualism punctuated by a series of step functions up.

Definitely. Self-driving car technology, in particular, is like that, because it’s really a big ask to expect people to trust self-driving cars on the road. So there’s this process by which that will happen and is already happening, where individual bits of autonomous technology are being incorporated into human-driven cars. And meanwhile, there’s a lot of experimentation with self-driving cars under relatively controlled conditions. And at some point, there will be a tipping point, and I will buy a car, and I will be sitting in my car and it will take me to New York, and I won’t have to be in control.

Of course, one impediment to that is that whole thing where a vast majority of the people believe the statistical impossibility that they are above-average drivers.

That’s right.

I, on the other hand, believe I’m a below-average driver. So I’m going to be the first person—I’m a menace on the road. You want me off as soon as you can. It probably is good enough for that. I know prognostication is hard, and I guess cars are different, because I can’t get a free self-driving car with a two-year contract at $39.95 a month, right? So it’s a big capital shift, but do you have a sense—because I’m sure you’re up on all of this—when you think the first fully autonomous car will happen? And then the most interesting thing, when will it be illegal not to drive a fully autonomous car?

I’m not quite sure how it will roll out. It may be that it’s in particular locations or particular regions first, but I think that ordinary people being able to drive a self-driving car; I would say within ten years.

I noticed you slipped that, “I don’t know when it’s going to roll out” pun in there.

Pun not intended. You see, if my AI could recognize that as a pun… Humor is another thing that intelligent agents are not very good at, and I think that’ll be a long time coming.

Right. So you have just confirmed that I’m a human.

So, next question, you’ve mentioned strong AI, also called an artificial general intelligence, that is an intelligence as smart as a human. So, back to your earlier question of does it matter, we’re going to be able to do things like self-driving cars and all this really cool stuff, without answering these philosophical questions; but I think the big question is can we make an AGI? 

Because if you look at what humans are good at doing, we’re good at transfer learning where we pick something to learn in one domain and map it to another one effortlessly. We are really good at taking one data point, like, you could show a human one data point of something, and then a hundred photos, and no matter how you change the lighting or the angle, a person will go, “There, there, there, and there.” So, do you think that an AGI is the sum total of a series of weak AIs bolted together? Or is there some, I’m going to use a loaded word, “magic,” and obviously I don’t mean magic, but is there some hitherto unknown magic that we’re going to need to discover or invent?

I think hitherto unknown magic, you know, using the word “magic” cautiously. I think there are individual technologies that are really exciting and are letting us do a lot of things. So right now, deep learning is the big buzz word, and it is kind of cool. We’ve taken old neural net technology, and we’ve updated it with qualitatively different ways of thinking about essentially neural network learning that we couldn’t really think about before, because we didn’t have the hardware to be able to do it at the scale or with the kind of complexity that deep learning networks exist now. So, deep learning is exciting. But deep learning, I think, is just fundamentally not suited to do this single point generalization that you’re talking about.

Big data is a buzz word, but I’m, personally, I’ve always been more interested in tiny data. Or maybe it’s big data in the service of tiny data, so I experience lots and lots and lots of things, and by having all of that background knowledge at my disposal, I can do one shot learning, because I can take that single instance and interpret it and understand what is relevant about that one single instance that I need to use to generalize to the next thing. One shot learning works because we have vast experience, but that doesn’t mean that throwing vast experience at that one thing is, by itself, going to let us generalize from that single thing. I think we still really haven’t developed the cognitive reasoning frameworks that will let us take the power of deep learning and big data, and apply it in these new contexts in creative ways, using different levels of reasoning and abstraction. But I think that’s where we’re headed, and I think a lot of people are thinking about that.

So I’m very hopeful that the broad AI community, in its lushest, many-flowers-blooming way of exploring different approaches, is developing a lot of ideas that eventually are going to come together into a big intelligent reasoning framework, that will let us take all of the different kinds of technologies that we’ve built for special purpose algorithms, and put them together—not just bolt it together, but really integrate it into a more coherent, broad framework for AGI.

If you look at the human genome, it’s, in computer terms, 720MB, give or take. But a vast amount of that is useless, and then a vast amount of that we share with banana trees. And if you look at the part that’s uniquely human, which gives us our unique intelligence, it may be 4MB or 8MB; it’s a really a small number. Yet in that little program are the instructions to make something that becomes an AGI. So do you take that to mean that there’s a secret, a trick—and again, I’m using words that I mean metaphorically—there’s something very simple we’re missing. Something you could write in a few lines of code. Maybe a short program that could make something that’s an AGI?

Yeah, we had a few hundred million years to evolve that. So, the length of something doesn’t necessarily mean that it’s simple. And I think I don’t know enough about genomics to talk really intelligently about this, but I do think that 4MB to 8MB that’s uniquely human interacts with everything else, with the rest of the genome, possibly with the parts that we think don’t do anything. Because there were parts of the genome that we thought didn’t do anything, but it turns out some of it does do something. It’s the dark matter of the genome. Just because we don’t know what it’s doing, I don’t know that that means that it’s not doing anything.

Well, that’s a really interesting point—the 4MB to 8MB may be highly compressed, to use the computer metaphor, and it may be decompressing to something that’s using all the rest. But let’s even say it takes 720MB, you’re still talking about something that will fit on an old CD-ROM, something smaller than most operating systems today.  

And I one hundred percent hear what you’re saying, which is nature has had a hundred million years to compress that, to make that really tight code. But, I guess the larger question I’m trying to ask is, do you think that an AGI may… The hope in AI had always been that, just like in the physical universe, there’s just a few laws that explain everything. Or is it that it’s like, no, we’re incredibly complicated, and it’s going to be this immense system that becomes a general intelligence, and it’s going to be of complexity we can’t wrap our heads around yet.

Gosh, I don’t know. I feel like I just can’t prognosticate that. I think if and when we have an AGI that we really think is intelligent, it probably will have an awful lot of component. The core that drives all of it may be, relatively speaking, fairly simple. But, if you think about how human intelligence works, we have lots and lots of modules. Right?

There’s this sort of core mechanism by which the brain processes information, that plays out in a lot of different ways, in different parts of the brain. We have the motor cortex, and we have the language cortex, and they’re all specialized. We have these specialized regions and specialized abilities. But they all use a common substrate or mechanism. And so when I think of the ultimate AI, I think of there being some sort of architecture that binds together a lot of different components that are doing different things. And it’s that architecture, that glue, that we haven’t really figured out how to think about yet.

There are cognitive architectures. There are people who work on designing cognitive architectures, and I think those are the precursors of what will ultimately become the architecture for intelligence. But I’m not sure we’re really working on that hard enough, or that we’ve made enough progress on that part of it. And it may be that the way that we get artificial intelligence ultimately is by building a really, really, really big deep learning neural network, which I would find maybe a little bit disappointing, because I feel like if that’s how we get there, we’re not really going to know what’s going on inside of it. Part of what brought me into the field of AI was really an interest in cognitive psychology, and trying to understand how the human brain works. So, maybe we can create another human-like intelligence by just kind of replicating the human brain. But I, personally, just from my own research perspective, wouldn’t find that especially satisfying, because it’s really hard to understand what’s going on in the human brain. And it’s hard to understand what’s going on even in any single deep learning network that can do visual processing or anything like that.

I think that in order for us to really adopt these intelligence systems and embrace them and trust them and be willing to use them, we’ll have to find ways for them to be more explainable and more understandable to human beings. Even if we go about replicating human intelligence in that way, I still think we need to be thinking about understandability and how it really works and how we extract meaning.

That’s really fascinating. So you’re saying if we made this big system that was huge and studied data, it’s kind of just brute force. We don’t have anything elegant about that. It doesn’t tell us anything about ourselves.

Yeah.

So my last theoretical question, and then I’d love to talk about jobs. You said at the very beginning that consciousness may be beyond our grasp, that somehow we’re too close to it, or it may be something we can’t agree on, we can’t measure, we can’t tell in others, and all of that. Is it possible that the same is true of a general intelligence? That in the end, this hope of yours that you said brought you into the field, that it’s going to give us deep insights into ourselves, actually isn’t possible?

Well, I mean, maybe. I don’t know. I think that we’ve already gained a lot of insight into ourselves, and because we’re humans, we’re curious. So if we build intelligent agents without fully understanding how they work or what they do, then maybe we’ll work side by side with them to understand each other. I don’t think we’re ever going to stop asking those questions, whether we get to some level of intelligent agents before then or after then. Questions about the universe are always going to be with us.

Onto the question that most people in their day-to-day lives worry about. They don’t worry as much about killer robots, as they do about job-killing robots. What do you think will be the effect? So, you know the setup. You know both sides of this. Is artificial intelligence something brand new that replaces people, and it’s going to get this critical velocity where it can learn things faster than us and eventually just surpass us in all fields? Or, is it like other disruptive technologies—arguably equally disruptive as such things as the mechanization of industry, the harnessing of steam power, of electricity—that came and went and never, ever budged unemployment even one iota. Because people learned, almost instantly, how to use these new technologies to increase their own productivity. Which of those two or a third choice do you think is most likely?

I’m not a believer in the singularity. I don’t see that happening—that these intelligent agents are going to surpass us and make us completely superfluous, or let us upload our brains into cyberspace or turn us into The Matrix. It could happen. I don’t rule it out, but that’s not what I think is most likely. What I really think is that this is like other technologies. It’s like the invention of the car or the television or the assembly line. If we use it correctly, it enhances human productivity, and it lets us create value at less human cost.

The question is not a scientific question or a technological question. The question is really a political question of how are we, as a society, going to decide to use that extra productivity? And unfortunately, in the past, we’ve often allowed that extra productivity to be channeled into the hands of a very few people, so that we just increased wealth disparity, and the people at the bottom of the economic pile have their jobs taken away. So they’re out of work, but more importantly, the benefit that’s being created by these new technologies isn’t benefiting them. And I think that we can choose to think differently about how we distribute the value that we get out of these new technologies.

The other thing is I think that as you automate various kinds of activities, the economy transforms itself. And we don’t know exactly how that is going to happen, and it would have been hard to predict before any historical technological disruption, right? You invent cars. Well, what happens to all the people who took care of the horses before? Something happened to them. That’s a big industry that’s gone. When we automate truck driving, this is going to be extremely disruptive, because truck driver is one of the most common jobs, in most of our country at least. So, what happens to the people who were truck drivers? It turns out that you’re automating some parts of that job, but not all of it. Because a truck driver doesn’t just sit at the wheel of a car and drive it down the road. The truck driver also loads and offloads and interacts with people at either end. So, maybe the truck driver job becomes more of a sales job, you know, there’s fewer of them, but they’re doing different things. Or maybe it’s supplanted by different kinds of service roles.

I think we’re becoming more and more of a service economy, and that’s partly because of automation. We always need more productivity. There’s always things that human society wants. And if we get some of those things with less human effort, that should let us create more of other things. I think we could use this productivity and support more art. That would be an amazing, transformational, twenty-first century kind of thing to do. I look at our current politics and our current society, and I’m not sure that enough people are thinking that way, that we can think about how to use these wonderful technologies to benefit everybody. I’m not sure that’s where we’re headed right now.

Let’s look at that. So there’s a wide range of options, and everybody’s going to be familiar with them all. On the one hand, you could say, you know, Facebook and Google made twelve billionaires between them. Why don’t we just take their money and give it to other people? All the way to the other extreme that says, look, all those truck drivers, or their corollaries, in the past, nobody in a top-down, heavy handed way reassigned them to different jobs. What happened was the market did a really good job of allocating technology, creating jobs, and recruiting them. So those would be two incredibly extreme positions. And then there’s this whole road in between where you’d say, well, we need more education. We need to help make it easier for people to become productive again. Where on that spectrum do you land? What do you think? What specific meat would you put on those bones?

I think taxes are not an inherently bad thing. Taxes are how we run our society, and our society is what protects people and enables people to invent things like Google. If we didn’t have taxes, and we didn’t have any government services, it would be extremely difficult for human society to invent things like Google, because to invent things like that requires collaboration, it requires infrastructure; it requires the support of people around you to make that happen. You couldn’t have Google if you didn’t have the Internet. And the Internet exists because the government invested in the Internet, and the government could invest in the Internet because we pay taxes to the government to create collective infrastructure. I think there’s always going to be a tension between how high should taxes be and how much should you tax the wealthy—how regressive, how progressive? Estate taxes; should you be able to build up a dynasty and pass along all of your wealth to your children? I have opinions about some of that, but there’s no right answer. It changes over time. But I do think that the reason that we come together as human beings to create governments and create societies is because we want to have some ability to have a protected place where we can pursue our individual goals. I want to be able to drive to and from my job on roads that are good, and have this interview with you through an Internet connection that’s maintained, and not to have marauding hordes steal my car while I’m in here. You know, we want safety and security and shared infrastructure. And I think the technology that we’re creating should let us do a better job at having that shared infrastructure and basic ability for people to live happy and productive lives.

So I don’t think that just taking money from rich people and giving it to poor people is the right way to do that, but I do think investing in a better society makes a lot of sense. We have horribly decaying infrastructure in much of the country. So, doesn’t it make sense to take some of the capital that’s created by technology advances and use it to improve the infrastructure in the country and improve health care for people?

Right. And of course the countervailing factor is, do all of the above without diminishing people’s incentives to work hard and found these companies that they created, and that’s the historical tension. Well, I would like to close with one question for you which is: are you optimistic about the future or pessimistic or how would you answer that?

I’m incredibly optimistic. I mean, you know, I’m pessimistic about individual things on individual days, but I think, collectively, we have made incredible strides in technology, and in making people’s quality of life better.

I think we could do a better job. There’s places where people don’t have the education or don’t have the infrastructure or don’t have access to jobs or technology. I think we have real issues with diversity in technology, both in creating technology and in benefiting from technology. I’m very, very concerned about the continuing under-representation of women and minority groups in computing and technology. And the reason for that is partly because I think it’s just socially unjust to not have everybody equally benefiting from good jobs, from the benefits of technology. But it’s also because the technology solutions that we create are influenced by the people who are creating them. When we have a very limited subset of the population creating technology, there’s a lot of evidence that shows that the technology is not as robust, and doesn’t serve as broad a population of users as technology that’s created by diverse teams of engineers. I’d love to see more women coming into computer science. I’d love to see more African Americans and Hispanics coming into computer science. That’s something I work on a lot. It’s something I think matters a lot to our future. But, I think we’re doing the right things in those areas, and people care about these things, and we’re pushing forward.

There’s a lot of really exciting stuff happening in the AI world right now, and it’s a great time to be an AI scientist because people talk about AI. I walk down the street, or I sit at Panera, and I hear people talking about the latest AI solution for this thing or that—it’s become a common term. Sometimes, I think it’s a little overused, because we sort of use it for anything that seems kind of cool, but that’s OK. I think we can use AI for anything that seems pretty cool, and I don’t think that hurts anything.

All right. Well, that’s a great place to end it. I want to thank you so much for covering this incredibly wide range of topics. This was great fun and very informative. Thank you for your time.

Yeah, thank you.

Byron explores issues around artificial intelligence and conscious computers in his upcoming book The Fourth Age, to be published in April by Atria, an imprint of Simon & Schuster. Pre-order a copy here.

.voice-in-ai-link-back-embed {
font-size: 1.4rem;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/cropped-voices-background.jpg) black;
background-position: center;
background-size: cover;
color: white;
padding: 1rem 1.5rem;
font-weight: 200;
text-transform: uppercase;
margin-bottom: 1.5rem;
}

.voice-in-ai-link-back-embed:last-of-type {
margin-bottom: 0;
}

.voice-in-ai-link-back-embed .logo {
margin-top: .25rem;
display: block;
background: url(https://voicesinai.com/wp-content/uploads/2017/06/voices-in-ai-logo-light-768×264.png) center left no-repeat;
background-size: contain;
width: 100%;
padding-bottom: 30%;
text-indent: -9999rem;
margin-bottom: 1.5rem
}

@media (min-width: 960px) {
.voice-in-ai-link-back-embed .logo {
width: 262px;
height: 90px;
float: left;
margin-right: 1.5rem;
margin-bottom: 0;
padding-bottom: 0;
}
}

.voice-in-ai-link-back-embed a:link,
.voice-in-ai-link-back-embed a:visited {
color: #FF6B00;
}

.voice-in-ai-link-back a:hover {
color: #ff4f00;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links {
margin-left: 0 !important;
margin-right: 0 !important;
margin-bottom: 0.25rem;
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:link,
.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:visited {
background-color: rgba(255, 255, 255, 0.77);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links a:hover {
background-color: rgba(255, 255, 255, 0.63);
}

.voice-in-ai-link-back-embed ul.go-alexa-briefing-subscribe-links .stitcher .stitcher-logo {
display: inline;
width: auto;
fill: currentColor;
height: 1em;
margin-bottom: -.15em;
}